[BNX2]: Fix driver software flag namespace.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.2"
60 #define DRV_MODULE_RELDATE      "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & BNX2_FLAG_MSIX_CAP)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk_msix = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & PHY_SERDES_FLAG) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static void
1190 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1191 {
1192         u32 val;
1193
1194         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1195         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1196         if (start)
1197                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1198         else
1199                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1200 }
1201
1202 static int
1203 bnx2_set_link(struct bnx2 *bp)
1204 {
1205         u32 bmsr;
1206         u8 link_up;
1207
1208         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1209                 bp->link_up = 1;
1210                 return 0;
1211         }
1212
1213         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1214                 return 0;
1215
1216         link_up = bp->link_up;
1217
1218         bnx2_enable_bmsr1(bp);
1219         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1220         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1221         bnx2_disable_bmsr1(bp);
1222
1223         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1224             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1225                 u32 val;
1226
1227                 if (bp->phy_flags & PHY_FORCED_DOWN_FLAG) {
1228                         bnx2_5706s_force_link_dn(bp, 0);
1229                         bp->phy_flags &= ~PHY_FORCED_DOWN_FLAG;
1230                 }
1231                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1232                 if (val & BNX2_EMAC_STATUS_LINK)
1233                         bmsr |= BMSR_LSTATUS;
1234                 else
1235                         bmsr &= ~BMSR_LSTATUS;
1236         }
1237
1238         if (bmsr & BMSR_LSTATUS) {
1239                 bp->link_up = 1;
1240
1241                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1242                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1243                                 bnx2_5706s_linkup(bp);
1244                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1245                                 bnx2_5708s_linkup(bp);
1246                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247                                 bnx2_5709s_linkup(bp);
1248                 }
1249                 else {
1250                         bnx2_copper_linkup(bp);
1251                 }
1252                 bnx2_resolve_flow_ctrl(bp);
1253         }
1254         else {
1255                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1256                     (bp->autoneg & AUTONEG_SPEED))
1257                         bnx2_disable_forced_2g5(bp);
1258
1259                 if (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG) {
1260                         u32 bmcr;
1261
1262                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1263                         bmcr |= BMCR_ANENABLE;
1264                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1265
1266                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1267                 }
1268                 bp->link_up = 0;
1269         }
1270
1271         if (bp->link_up != link_up) {
1272                 bnx2_report_link(bp);
1273         }
1274
1275         bnx2_set_mac_link(bp);
1276
1277         return 0;
1278 }
1279
1280 static int
1281 bnx2_reset_phy(struct bnx2 *bp)
1282 {
1283         int i;
1284         u32 reg;
1285
1286         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1287
1288 #define PHY_RESET_MAX_WAIT 100
1289         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1290                 udelay(10);
1291
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1293                 if (!(reg & BMCR_RESET)) {
1294                         udelay(20);
1295                         break;
1296                 }
1297         }
1298         if (i == PHY_RESET_MAX_WAIT) {
1299                 return -EBUSY;
1300         }
1301         return 0;
1302 }
1303
1304 static u32
1305 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1306 {
1307         u32 adv = 0;
1308
1309         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1310                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1311
1312                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1313                         adv = ADVERTISE_1000XPAUSE;
1314                 }
1315                 else {
1316                         adv = ADVERTISE_PAUSE_CAP;
1317                 }
1318         }
1319         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1320                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1321                         adv = ADVERTISE_1000XPSE_ASYM;
1322                 }
1323                 else {
1324                         adv = ADVERTISE_PAUSE_ASYM;
1325                 }
1326         }
1327         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1328                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1329                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1330                 }
1331                 else {
1332                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1333                 }
1334         }
1335         return adv;
1336 }
1337
1338 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1339
1340 static int
1341 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1342 {
1343         u32 speed_arg = 0, pause_adv;
1344
1345         pause_adv = bnx2_phy_get_pause_adv(bp);
1346
1347         if (bp->autoneg & AUTONEG_SPEED) {
1348                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1349                 if (bp->advertising & ADVERTISED_10baseT_Half)
1350                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 if (bp->advertising & ADVERTISED_10baseT_Full)
1352                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1353                 if (bp->advertising & ADVERTISED_100baseT_Half)
1354                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1355                 if (bp->advertising & ADVERTISED_100baseT_Full)
1356                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1357                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1359                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1360                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1361         } else {
1362                 if (bp->req_line_speed == SPEED_2500)
1363                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1364                 else if (bp->req_line_speed == SPEED_1000)
1365                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1366                 else if (bp->req_line_speed == SPEED_100) {
1367                         if (bp->req_duplex == DUPLEX_FULL)
1368                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369                         else
1370                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1371                 } else if (bp->req_line_speed == SPEED_10) {
1372                         if (bp->req_duplex == DUPLEX_FULL)
1373                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1374                         else
1375                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1376                 }
1377         }
1378
1379         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1380                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1381         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1382                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1383
1384         if (port == PORT_TP)
1385                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1386                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1387
1388         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1389
1390         spin_unlock_bh(&bp->phy_lock);
1391         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1392         spin_lock_bh(&bp->phy_lock);
1393
1394         return 0;
1395 }
1396
1397 static int
1398 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1399 {
1400         u32 adv, bmcr;
1401         u32 new_adv = 0;
1402
1403         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1404                 return (bnx2_setup_remote_phy(bp, port));
1405
1406         if (!(bp->autoneg & AUTONEG_SPEED)) {
1407                 u32 new_bmcr;
1408                 int force_link_down = 0;
1409
1410                 if (bp->req_line_speed == SPEED_2500) {
1411                         if (!bnx2_test_and_enable_2g5(bp))
1412                                 force_link_down = 1;
1413                 } else if (bp->req_line_speed == SPEED_1000) {
1414                         if (bnx2_test_and_disable_2g5(bp))
1415                                 force_link_down = 1;
1416                 }
1417                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1418                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1419
1420                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1422                 new_bmcr |= BMCR_SPEED1000;
1423
1424                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1425                         if (bp->req_line_speed == SPEED_2500)
1426                                 bnx2_enable_forced_2g5(bp);
1427                         else if (bp->req_line_speed == SPEED_1000) {
1428                                 bnx2_disable_forced_2g5(bp);
1429                                 new_bmcr &= ~0x2000;
1430                         }
1431
1432                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1433                         if (bp->req_line_speed == SPEED_2500)
1434                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1435                         else
1436                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1437                 }
1438
1439                 if (bp->req_duplex == DUPLEX_FULL) {
1440                         adv |= ADVERTISE_1000XFULL;
1441                         new_bmcr |= BMCR_FULLDPLX;
1442                 }
1443                 else {
1444                         adv |= ADVERTISE_1000XHALF;
1445                         new_bmcr &= ~BMCR_FULLDPLX;
1446                 }
1447                 if ((new_bmcr != bmcr) || (force_link_down)) {
1448                         /* Force a link down visible on the other side */
1449                         if (bp->link_up) {
1450                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1451                                                ~(ADVERTISE_1000XFULL |
1452                                                  ADVERTISE_1000XHALF));
1453                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1454                                         BMCR_ANRESTART | BMCR_ANENABLE);
1455
1456                                 bp->link_up = 0;
1457                                 netif_carrier_off(bp->dev);
1458                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1459                                 bnx2_report_link(bp);
1460                         }
1461                         bnx2_write_phy(bp, bp->mii_adv, adv);
1462                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1463                 } else {
1464                         bnx2_resolve_flow_ctrl(bp);
1465                         bnx2_set_mac_link(bp);
1466                 }
1467                 return 0;
1468         }
1469
1470         bnx2_test_and_enable_2g5(bp);
1471
1472         if (bp->advertising & ADVERTISED_1000baseT_Full)
1473                 new_adv |= ADVERTISE_1000XFULL;
1474
1475         new_adv |= bnx2_phy_get_pause_adv(bp);
1476
1477         bnx2_read_phy(bp, bp->mii_adv, &adv);
1478         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480         bp->serdes_an_pending = 0;
1481         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1482                 /* Force a link down visible on the other side */
1483                 if (bp->link_up) {
1484                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1485                         spin_unlock_bh(&bp->phy_lock);
1486                         msleep(20);
1487                         spin_lock_bh(&bp->phy_lock);
1488                 }
1489
1490                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1491                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1492                         BMCR_ANENABLE);
1493                 /* Speed up link-up time when the link partner
1494                  * does not autonegotiate which is very common
1495                  * in blade servers. Some blade servers use
1496                  * IPMI for kerboard input and it's important
1497                  * to minimize link disruptions. Autoneg. involves
1498                  * exchanging base pages plus 3 next pages and
1499                  * normally completes in about 120 msec.
1500                  */
1501                 bp->current_interval = SERDES_AN_TIMEOUT;
1502                 bp->serdes_an_pending = 1;
1503                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1504         } else {
1505                 bnx2_resolve_flow_ctrl(bp);
1506                 bnx2_set_mac_link(bp);
1507         }
1508
1509         return 0;
1510 }
1511
1512 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1513         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1514                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1515                 (ADVERTISED_1000baseT_Full)
1516
1517 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1518         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1519         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1520         ADVERTISED_1000baseT_Full)
1521
1522 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1523         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1524
1525 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1526
1527 static void
1528 bnx2_set_default_remote_link(struct bnx2 *bp)
1529 {
1530         u32 link;
1531
1532         if (bp->phy_port == PORT_TP)
1533                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1534         else
1535                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1536
1537         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1538                 bp->req_line_speed = 0;
1539                 bp->autoneg |= AUTONEG_SPEED;
1540                 bp->advertising = ADVERTISED_Autoneg;
1541                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1542                         bp->advertising |= ADVERTISED_10baseT_Half;
1543                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1544                         bp->advertising |= ADVERTISED_10baseT_Full;
1545                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1546                         bp->advertising |= ADVERTISED_100baseT_Half;
1547                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1548                         bp->advertising |= ADVERTISED_100baseT_Full;
1549                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1550                         bp->advertising |= ADVERTISED_1000baseT_Full;
1551                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1552                         bp->advertising |= ADVERTISED_2500baseX_Full;
1553         } else {
1554                 bp->autoneg = 0;
1555                 bp->advertising = 0;
1556                 bp->req_duplex = DUPLEX_FULL;
1557                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1558                         bp->req_line_speed = SPEED_10;
1559                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1560                                 bp->req_duplex = DUPLEX_HALF;
1561                 }
1562                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1563                         bp->req_line_speed = SPEED_100;
1564                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1565                                 bp->req_duplex = DUPLEX_HALF;
1566                 }
1567                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1568                         bp->req_line_speed = SPEED_1000;
1569                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1570                         bp->req_line_speed = SPEED_2500;
1571         }
1572 }
1573
1574 static void
1575 bnx2_set_default_link(struct bnx2 *bp)
1576 {
1577         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1578                 return bnx2_set_default_remote_link(bp);
1579
1580         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1581         bp->req_line_speed = 0;
1582         if (bp->phy_flags & PHY_SERDES_FLAG) {
1583                 u32 reg;
1584
1585                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1586
1587                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1588                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1589                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1590                         bp->autoneg = 0;
1591                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1592                         bp->req_duplex = DUPLEX_FULL;
1593                 }
1594         } else
1595                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1596 }
1597
1598 static void
1599 bnx2_send_heart_beat(struct bnx2 *bp)
1600 {
1601         u32 msg;
1602         u32 addr;
1603
1604         spin_lock(&bp->indirect_lock);
1605         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1606         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1607         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1608         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1609         spin_unlock(&bp->indirect_lock);
1610 }
1611
1612 static void
1613 bnx2_remote_phy_event(struct bnx2 *bp)
1614 {
1615         u32 msg;
1616         u8 link_up = bp->link_up;
1617         u8 old_port;
1618
1619         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1620
1621         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1622                 bnx2_send_heart_beat(bp);
1623
1624         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1625
1626         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1627                 bp->link_up = 0;
1628         else {
1629                 u32 speed;
1630
1631                 bp->link_up = 1;
1632                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1633                 bp->duplex = DUPLEX_FULL;
1634                 switch (speed) {
1635                         case BNX2_LINK_STATUS_10HALF:
1636                                 bp->duplex = DUPLEX_HALF;
1637                         case BNX2_LINK_STATUS_10FULL:
1638                                 bp->line_speed = SPEED_10;
1639                                 break;
1640                         case BNX2_LINK_STATUS_100HALF:
1641                                 bp->duplex = DUPLEX_HALF;
1642                         case BNX2_LINK_STATUS_100BASE_T4:
1643                         case BNX2_LINK_STATUS_100FULL:
1644                                 bp->line_speed = SPEED_100;
1645                                 break;
1646                         case BNX2_LINK_STATUS_1000HALF:
1647                                 bp->duplex = DUPLEX_HALF;
1648                         case BNX2_LINK_STATUS_1000FULL:
1649                                 bp->line_speed = SPEED_1000;
1650                                 break;
1651                         case BNX2_LINK_STATUS_2500HALF:
1652                                 bp->duplex = DUPLEX_HALF;
1653                         case BNX2_LINK_STATUS_2500FULL:
1654                                 bp->line_speed = SPEED_2500;
1655                                 break;
1656                         default:
1657                                 bp->line_speed = 0;
1658                                 break;
1659                 }
1660
1661                 spin_lock(&bp->phy_lock);
1662                 bp->flow_ctrl = 0;
1663                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1664                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1665                         if (bp->duplex == DUPLEX_FULL)
1666                                 bp->flow_ctrl = bp->req_flow_ctrl;
1667                 } else {
1668                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1669                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1670                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1671                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1672                 }
1673
1674                 old_port = bp->phy_port;
1675                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1676                         bp->phy_port = PORT_FIBRE;
1677                 else
1678                         bp->phy_port = PORT_TP;
1679
1680                 if (old_port != bp->phy_port)
1681                         bnx2_set_default_link(bp);
1682
1683                 spin_unlock(&bp->phy_lock);
1684         }
1685         if (bp->link_up != link_up)
1686                 bnx2_report_link(bp);
1687
1688         bnx2_set_mac_link(bp);
1689 }
1690
1691 static int
1692 bnx2_set_remote_link(struct bnx2 *bp)
1693 {
1694         u32 evt_code;
1695
1696         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1697         switch (evt_code) {
1698                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1699                         bnx2_remote_phy_event(bp);
1700                         break;
1701                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1702                 default:
1703                         bnx2_send_heart_beat(bp);
1704                         break;
1705         }
1706         return 0;
1707 }
1708
1709 static int
1710 bnx2_setup_copper_phy(struct bnx2 *bp)
1711 {
1712         u32 bmcr;
1713         u32 new_bmcr;
1714
1715         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1716
1717         if (bp->autoneg & AUTONEG_SPEED) {
1718                 u32 adv_reg, adv1000_reg;
1719                 u32 new_adv_reg = 0;
1720                 u32 new_adv1000_reg = 0;
1721
1722                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1723                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1724                         ADVERTISE_PAUSE_ASYM);
1725
1726                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1727                 adv1000_reg &= PHY_ALL_1000_SPEED;
1728
1729                 if (bp->advertising & ADVERTISED_10baseT_Half)
1730                         new_adv_reg |= ADVERTISE_10HALF;
1731                 if (bp->advertising & ADVERTISED_10baseT_Full)
1732                         new_adv_reg |= ADVERTISE_10FULL;
1733                 if (bp->advertising & ADVERTISED_100baseT_Half)
1734                         new_adv_reg |= ADVERTISE_100HALF;
1735                 if (bp->advertising & ADVERTISED_100baseT_Full)
1736                         new_adv_reg |= ADVERTISE_100FULL;
1737                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1738                         new_adv1000_reg |= ADVERTISE_1000FULL;
1739
1740                 new_adv_reg |= ADVERTISE_CSMA;
1741
1742                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1743
1744                 if ((adv1000_reg != new_adv1000_reg) ||
1745                         (adv_reg != new_adv_reg) ||
1746                         ((bmcr & BMCR_ANENABLE) == 0)) {
1747
1748                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1749                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1750                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1751                                 BMCR_ANENABLE);
1752                 }
1753                 else if (bp->link_up) {
1754                         /* Flow ctrl may have changed from auto to forced */
1755                         /* or vice-versa. */
1756
1757                         bnx2_resolve_flow_ctrl(bp);
1758                         bnx2_set_mac_link(bp);
1759                 }
1760                 return 0;
1761         }
1762
1763         new_bmcr = 0;
1764         if (bp->req_line_speed == SPEED_100) {
1765                 new_bmcr |= BMCR_SPEED100;
1766         }
1767         if (bp->req_duplex == DUPLEX_FULL) {
1768                 new_bmcr |= BMCR_FULLDPLX;
1769         }
1770         if (new_bmcr != bmcr) {
1771                 u32 bmsr;
1772
1773                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1774                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1775
1776                 if (bmsr & BMSR_LSTATUS) {
1777                         /* Force link down */
1778                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1779                         spin_unlock_bh(&bp->phy_lock);
1780                         msleep(50);
1781                         spin_lock_bh(&bp->phy_lock);
1782
1783                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1784                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1785                 }
1786
1787                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788
1789                 /* Normally, the new speed is setup after the link has
1790                  * gone down and up again. In some cases, link will not go
1791                  * down so we need to set up the new speed here.
1792                  */
1793                 if (bmsr & BMSR_LSTATUS) {
1794                         bp->line_speed = bp->req_line_speed;
1795                         bp->duplex = bp->req_duplex;
1796                         bnx2_resolve_flow_ctrl(bp);
1797                         bnx2_set_mac_link(bp);
1798                 }
1799         } else {
1800                 bnx2_resolve_flow_ctrl(bp);
1801                 bnx2_set_mac_link(bp);
1802         }
1803         return 0;
1804 }
1805
1806 static int
1807 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1808 {
1809         if (bp->loopback == MAC_LOOPBACK)
1810                 return 0;
1811
1812         if (bp->phy_flags & PHY_SERDES_FLAG) {
1813                 return (bnx2_setup_serdes_phy(bp, port));
1814         }
1815         else {
1816                 return (bnx2_setup_copper_phy(bp));
1817         }
1818 }
1819
1820 static int
1821 bnx2_init_5709s_phy(struct bnx2 *bp)
1822 {
1823         u32 val;
1824
1825         bp->mii_bmcr = MII_BMCR + 0x10;
1826         bp->mii_bmsr = MII_BMSR + 0x10;
1827         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1828         bp->mii_adv = MII_ADVERTISE + 0x10;
1829         bp->mii_lpa = MII_LPA + 0x10;
1830         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1831
1832         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1833         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1834
1835         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1836         bnx2_reset_phy(bp);
1837
1838         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1839
1840         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1841         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1842         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1843         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1844
1845         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1846         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1847         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1848                 val |= BCM5708S_UP1_2G5;
1849         else
1850                 val &= ~BCM5708S_UP1_2G5;
1851         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1852
1853         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1854         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1855         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1856         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1857
1858         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1859
1860         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1861               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1862         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1863
1864         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1865
1866         return 0;
1867 }
1868
1869 static int
1870 bnx2_init_5708s_phy(struct bnx2 *bp)
1871 {
1872         u32 val;
1873
1874         bnx2_reset_phy(bp);
1875
1876         bp->mii_up1 = BCM5708S_UP1;
1877
1878         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1879         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1880         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881
1882         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1883         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1884         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1885
1886         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1887         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1888         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1889
1890         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1891                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1892                 val |= BCM5708S_UP1_2G5;
1893                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1894         }
1895
1896         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1897             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1898             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1899                 /* increase tx signal amplitude */
1900                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1901                                BCM5708S_BLK_ADDR_TX_MISC);
1902                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1903                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1904                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1905                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1906         }
1907
1908         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1909               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1910
1911         if (val) {
1912                 u32 is_backplane;
1913
1914                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1915                                           BNX2_SHARED_HW_CFG_CONFIG);
1916                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1917                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1918                                        BCM5708S_BLK_ADDR_TX_MISC);
1919                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1920                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1921                                        BCM5708S_BLK_ADDR_DIG);
1922                 }
1923         }
1924         return 0;
1925 }
1926
1927 static int
1928 bnx2_init_5706s_phy(struct bnx2 *bp)
1929 {
1930         bnx2_reset_phy(bp);
1931
1932         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1933
1934         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1935                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1936
1937         if (bp->dev->mtu > 1500) {
1938                 u32 val;
1939
1940                 /* Set extended packet length bit */
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1944
1945                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1946                 bnx2_read_phy(bp, 0x1c, &val);
1947                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1948         }
1949         else {
1950                 u32 val;
1951
1952                 bnx2_write_phy(bp, 0x18, 0x7);
1953                 bnx2_read_phy(bp, 0x18, &val);
1954                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1955
1956                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957                 bnx2_read_phy(bp, 0x1c, &val);
1958                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_init_copper_phy(struct bnx2 *bp)
1966 {
1967         u32 val;
1968
1969         bnx2_reset_phy(bp);
1970
1971         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1972                 bnx2_write_phy(bp, 0x18, 0x0c00);
1973                 bnx2_write_phy(bp, 0x17, 0x000a);
1974                 bnx2_write_phy(bp, 0x15, 0x310b);
1975                 bnx2_write_phy(bp, 0x17, 0x201f);
1976                 bnx2_write_phy(bp, 0x15, 0x9506);
1977                 bnx2_write_phy(bp, 0x17, 0x401f);
1978                 bnx2_write_phy(bp, 0x15, 0x14e2);
1979                 bnx2_write_phy(bp, 0x18, 0x0400);
1980         }
1981
1982         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1983                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1984                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1985                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1986                 val &= ~(1 << 8);
1987                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1988         }
1989
1990         if (bp->dev->mtu > 1500) {
1991                 /* Set extended packet length bit */
1992                 bnx2_write_phy(bp, 0x18, 0x7);
1993                 bnx2_read_phy(bp, 0x18, &val);
1994                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1995
1996                 bnx2_read_phy(bp, 0x10, &val);
1997                 bnx2_write_phy(bp, 0x10, val | 0x1);
1998         }
1999         else {
2000                 bnx2_write_phy(bp, 0x18, 0x7);
2001                 bnx2_read_phy(bp, 0x18, &val);
2002                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2003
2004                 bnx2_read_phy(bp, 0x10, &val);
2005                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2006         }
2007
2008         /* ethernet@wirespeed */
2009         bnx2_write_phy(bp, 0x18, 0x7007);
2010         bnx2_read_phy(bp, 0x18, &val);
2011         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2012         return 0;
2013 }
2014
2015
2016 static int
2017 bnx2_init_phy(struct bnx2 *bp)
2018 {
2019         u32 val;
2020         int rc = 0;
2021
2022         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
2023         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
2024
2025         bp->mii_bmcr = MII_BMCR;
2026         bp->mii_bmsr = MII_BMSR;
2027         bp->mii_bmsr1 = MII_BMSR;
2028         bp->mii_adv = MII_ADVERTISE;
2029         bp->mii_lpa = MII_LPA;
2030
2031         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2032
2033         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2034                 goto setup_phy;
2035
2036         bnx2_read_phy(bp, MII_PHYSID1, &val);
2037         bp->phy_id = val << 16;
2038         bnx2_read_phy(bp, MII_PHYSID2, &val);
2039         bp->phy_id |= val & 0xffff;
2040
2041         if (bp->phy_flags & PHY_SERDES_FLAG) {
2042                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2043                         rc = bnx2_init_5706s_phy(bp);
2044                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2045                         rc = bnx2_init_5708s_phy(bp);
2046                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2047                         rc = bnx2_init_5709s_phy(bp);
2048         }
2049         else {
2050                 rc = bnx2_init_copper_phy(bp);
2051         }
2052
2053 setup_phy:
2054         if (!rc)
2055                 rc = bnx2_setup_phy(bp, bp->phy_port);
2056
2057         return rc;
2058 }
2059
2060 static int
2061 bnx2_set_mac_loopback(struct bnx2 *bp)
2062 {
2063         u32 mac_mode;
2064
2065         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2066         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2067         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2068         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2069         bp->link_up = 1;
2070         return 0;
2071 }
2072
2073 static int bnx2_test_link(struct bnx2 *);
2074
2075 static int
2076 bnx2_set_phy_loopback(struct bnx2 *bp)
2077 {
2078         u32 mac_mode;
2079         int rc, i;
2080
2081         spin_lock_bh(&bp->phy_lock);
2082         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2083                             BMCR_SPEED1000);
2084         spin_unlock_bh(&bp->phy_lock);
2085         if (rc)
2086                 return rc;
2087
2088         for (i = 0; i < 10; i++) {
2089                 if (bnx2_test_link(bp) == 0)
2090                         break;
2091                 msleep(100);
2092         }
2093
2094         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2095         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2096                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2097                       BNX2_EMAC_MODE_25G_MODE);
2098
2099         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2100         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2101         bp->link_up = 1;
2102         return 0;
2103 }
2104
2105 static int
2106 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2107 {
2108         int i;
2109         u32 val;
2110
2111         bp->fw_wr_seq++;
2112         msg_data |= bp->fw_wr_seq;
2113
2114         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2115
2116         /* wait for an acknowledgement. */
2117         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2118                 msleep(10);
2119
2120                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2121
2122                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2123                         break;
2124         }
2125         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2126                 return 0;
2127
2128         /* If we timed out, inform the firmware that this is the case. */
2129         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2130                 if (!silent)
2131                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2132                                             "%x\n", msg_data);
2133
2134                 msg_data &= ~BNX2_DRV_MSG_CODE;
2135                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2136
2137                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2138
2139                 return -EBUSY;
2140         }
2141
2142         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2143                 return -EIO;
2144
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_init_5709_context(struct bnx2 *bp)
2150 {
2151         int i, ret = 0;
2152         u32 val;
2153
2154         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2155         val |= (BCM_PAGE_BITS - 8) << 16;
2156         REG_WR(bp, BNX2_CTX_COMMAND, val);
2157         for (i = 0; i < 10; i++) {
2158                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2159                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2160                         break;
2161                 udelay(2);
2162         }
2163         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2164                 return -EBUSY;
2165
2166         for (i = 0; i < bp->ctx_pages; i++) {
2167                 int j;
2168
2169                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2170                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2171                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2172                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2173                        (u64) bp->ctx_blk_mapping[i] >> 32);
2174                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2175                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2176                 for (j = 0; j < 10; j++) {
2177
2178                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2179                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2180                                 break;
2181                         udelay(5);
2182                 }
2183                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2184                         ret = -EBUSY;
2185                         break;
2186                 }
2187         }
2188         return ret;
2189 }
2190
2191 static void
2192 bnx2_init_context(struct bnx2 *bp)
2193 {
2194         u32 vcid;
2195
2196         vcid = 96;
2197         while (vcid) {
2198                 u32 vcid_addr, pcid_addr, offset;
2199                 int i;
2200
2201                 vcid--;
2202
2203                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2204                         u32 new_vcid;
2205
2206                         vcid_addr = GET_PCID_ADDR(vcid);
2207                         if (vcid & 0x8) {
2208                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2209                         }
2210                         else {
2211                                 new_vcid = vcid;
2212                         }
2213                         pcid_addr = GET_PCID_ADDR(new_vcid);
2214                 }
2215                 else {
2216                         vcid_addr = GET_CID_ADDR(vcid);
2217                         pcid_addr = vcid_addr;
2218                 }
2219
2220                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2221                         vcid_addr += (i << PHY_CTX_SHIFT);
2222                         pcid_addr += (i << PHY_CTX_SHIFT);
2223
2224                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2225                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2226
2227                         /* Zero out the context. */
2228                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2229                                 CTX_WR(bp, vcid_addr, offset, 0);
2230                 }
2231         }
2232 }
2233
2234 static int
2235 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2236 {
2237         u16 *good_mbuf;
2238         u32 good_mbuf_cnt;
2239         u32 val;
2240
2241         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2242         if (good_mbuf == NULL) {
2243                 printk(KERN_ERR PFX "Failed to allocate memory in "
2244                                     "bnx2_alloc_bad_rbuf\n");
2245                 return -ENOMEM;
2246         }
2247
2248         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2249                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2250
2251         good_mbuf_cnt = 0;
2252
2253         /* Allocate a bunch of mbufs and save the good ones in an array. */
2254         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2255         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2256                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2257
2258                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2259
2260                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2261
2262                 /* The addresses with Bit 9 set are bad memory blocks. */
2263                 if (!(val & (1 << 9))) {
2264                         good_mbuf[good_mbuf_cnt] = (u16) val;
2265                         good_mbuf_cnt++;
2266                 }
2267
2268                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2269         }
2270
2271         /* Free the good ones back to the mbuf pool thus discarding
2272          * all the bad ones. */
2273         while (good_mbuf_cnt) {
2274                 good_mbuf_cnt--;
2275
2276                 val = good_mbuf[good_mbuf_cnt];
2277                 val = (val << 9) | val | 1;
2278
2279                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2280         }
2281         kfree(good_mbuf);
2282         return 0;
2283 }
2284
2285 static void
2286 bnx2_set_mac_addr(struct bnx2 *bp)
2287 {
2288         u32 val;
2289         u8 *mac_addr = bp->dev->dev_addr;
2290
2291         val = (mac_addr[0] << 8) | mac_addr[1];
2292
2293         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2294
2295         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2296                 (mac_addr[4] << 8) | mac_addr[5];
2297
2298         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2299 }
2300
2301 static inline int
2302 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2303 {
2304         dma_addr_t mapping;
2305         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2306         struct rx_bd *rxbd =
2307                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2308         struct page *page = alloc_page(GFP_ATOMIC);
2309
2310         if (!page)
2311                 return -ENOMEM;
2312         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2313                                PCI_DMA_FROMDEVICE);
2314         rx_pg->page = page;
2315         pci_unmap_addr_set(rx_pg, mapping, mapping);
2316         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2317         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2318         return 0;
2319 }
2320
2321 static void
2322 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2323 {
2324         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2325         struct page *page = rx_pg->page;
2326
2327         if (!page)
2328                 return;
2329
2330         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2331                        PCI_DMA_FROMDEVICE);
2332
2333         __free_page(page);
2334         rx_pg->page = NULL;
2335 }
2336
2337 static inline int
2338 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2339 {
2340         struct sk_buff *skb;
2341         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2342         dma_addr_t mapping;
2343         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2344         unsigned long align;
2345
2346         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2347         if (skb == NULL) {
2348                 return -ENOMEM;
2349         }
2350
2351         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2352                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2353
2354         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2355                 PCI_DMA_FROMDEVICE);
2356
2357         rx_buf->skb = skb;
2358         pci_unmap_addr_set(rx_buf, mapping, mapping);
2359
2360         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2361         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2362
2363         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2364
2365         return 0;
2366 }
2367
2368 static int
2369 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2370 {
2371         struct status_block *sblk = bnapi->status_blk;
2372         u32 new_link_state, old_link_state;
2373         int is_set = 1;
2374
2375         new_link_state = sblk->status_attn_bits & event;
2376         old_link_state = sblk->status_attn_bits_ack & event;
2377         if (new_link_state != old_link_state) {
2378                 if (new_link_state)
2379                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2380                 else
2381                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2382         } else
2383                 is_set = 0;
2384
2385         return is_set;
2386 }
2387
2388 static void
2389 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2390 {
2391         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2392                 spin_lock(&bp->phy_lock);
2393                 bnx2_set_link(bp);
2394                 spin_unlock(&bp->phy_lock);
2395         }
2396         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2397                 bnx2_set_remote_link(bp);
2398
2399 }
2400
2401 static inline u16
2402 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2403 {
2404         u16 cons;
2405
2406         if (bnapi->int_num == 0)
2407                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2408         else
2409                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2410
2411         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2412                 cons++;
2413         return cons;
2414 }
2415
2416 static int
2417 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2418 {
2419         u16 hw_cons, sw_cons, sw_ring_cons;
2420         int tx_pkt = 0;
2421
2422         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2423         sw_cons = bnapi->tx_cons;
2424
2425         while (sw_cons != hw_cons) {
2426                 struct sw_bd *tx_buf;
2427                 struct sk_buff *skb;
2428                 int i, last;
2429
2430                 sw_ring_cons = TX_RING_IDX(sw_cons);
2431
2432                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2433                 skb = tx_buf->skb;
2434
2435                 /* partial BD completions possible with TSO packets */
2436                 if (skb_is_gso(skb)) {
2437                         u16 last_idx, last_ring_idx;
2438
2439                         last_idx = sw_cons +
2440                                 skb_shinfo(skb)->nr_frags + 1;
2441                         last_ring_idx = sw_ring_cons +
2442                                 skb_shinfo(skb)->nr_frags + 1;
2443                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2444                                 last_idx++;
2445                         }
2446                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2447                                 break;
2448                         }
2449                 }
2450
2451                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2452                         skb_headlen(skb), PCI_DMA_TODEVICE);
2453
2454                 tx_buf->skb = NULL;
2455                 last = skb_shinfo(skb)->nr_frags;
2456
2457                 for (i = 0; i < last; i++) {
2458                         sw_cons = NEXT_TX_BD(sw_cons);
2459
2460                         pci_unmap_page(bp->pdev,
2461                                 pci_unmap_addr(
2462                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2463                                         mapping),
2464                                 skb_shinfo(skb)->frags[i].size,
2465                                 PCI_DMA_TODEVICE);
2466                 }
2467
2468                 sw_cons = NEXT_TX_BD(sw_cons);
2469
2470                 dev_kfree_skb(skb);
2471                 tx_pkt++;
2472                 if (tx_pkt == budget)
2473                         break;
2474
2475                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2476         }
2477
2478         bnapi->hw_tx_cons = hw_cons;
2479         bnapi->tx_cons = sw_cons;
2480         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2481          * before checking for netif_queue_stopped().  Without the
2482          * memory barrier, there is a small possibility that bnx2_start_xmit()
2483          * will miss it and cause the queue to be stopped forever.
2484          */
2485         smp_mb();
2486
2487         if (unlikely(netif_queue_stopped(bp->dev)) &&
2488                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2489                 netif_tx_lock(bp->dev);
2490                 if ((netif_queue_stopped(bp->dev)) &&
2491                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2492                         netif_wake_queue(bp->dev);
2493                 netif_tx_unlock(bp->dev);
2494         }
2495         return tx_pkt;
2496 }
2497
2498 static void
2499 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2500                         struct sk_buff *skb, int count)
2501 {
2502         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2503         struct rx_bd *cons_bd, *prod_bd;
2504         dma_addr_t mapping;
2505         int i;
2506         u16 hw_prod = bnapi->rx_pg_prod, prod;
2507         u16 cons = bnapi->rx_pg_cons;
2508
2509         for (i = 0; i < count; i++) {
2510                 prod = RX_PG_RING_IDX(hw_prod);
2511
2512                 prod_rx_pg = &bp->rx_pg_ring[prod];
2513                 cons_rx_pg = &bp->rx_pg_ring[cons];
2514                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2515                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2516
2517                 if (i == 0 && skb) {
2518                         struct page *page;
2519                         struct skb_shared_info *shinfo;
2520
2521                         shinfo = skb_shinfo(skb);
2522                         shinfo->nr_frags--;
2523                         page = shinfo->frags[shinfo->nr_frags].page;
2524                         shinfo->frags[shinfo->nr_frags].page = NULL;
2525                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2526                                                PCI_DMA_FROMDEVICE);
2527                         cons_rx_pg->page = page;
2528                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2529                         dev_kfree_skb(skb);
2530                 }
2531                 if (prod != cons) {
2532                         prod_rx_pg->page = cons_rx_pg->page;
2533                         cons_rx_pg->page = NULL;
2534                         pci_unmap_addr_set(prod_rx_pg, mapping,
2535                                 pci_unmap_addr(cons_rx_pg, mapping));
2536
2537                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2538                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2539
2540                 }
2541                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2542                 hw_prod = NEXT_RX_BD(hw_prod);
2543         }
2544         bnapi->rx_pg_prod = hw_prod;
2545         bnapi->rx_pg_cons = cons;
2546 }
2547
2548 static inline void
2549 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550         u16 cons, u16 prod)
2551 {
2552         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2553         struct rx_bd *cons_bd, *prod_bd;
2554
2555         cons_rx_buf = &bp->rx_buf_ring[cons];
2556         prod_rx_buf = &bp->rx_buf_ring[prod];
2557
2558         pci_dma_sync_single_for_device(bp->pdev,
2559                 pci_unmap_addr(cons_rx_buf, mapping),
2560                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2561
2562         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2563
2564         prod_rx_buf->skb = skb;
2565
2566         if (cons == prod)
2567                 return;
2568
2569         pci_unmap_addr_set(prod_rx_buf, mapping,
2570                         pci_unmap_addr(cons_rx_buf, mapping));
2571
2572         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2573         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2574         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2575         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2576 }
2577
2578 static int
2579 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2580             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2581             u32 ring_idx)
2582 {
2583         int err;
2584         u16 prod = ring_idx & 0xffff;
2585
2586         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2587         if (unlikely(err)) {
2588                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2589                 if (hdr_len) {
2590                         unsigned int raw_len = len + 4;
2591                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2592
2593                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2594                 }
2595                 return err;
2596         }
2597
2598         skb_reserve(skb, bp->rx_offset);
2599         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2600                          PCI_DMA_FROMDEVICE);
2601
2602         if (hdr_len == 0) {
2603                 skb_put(skb, len);
2604                 return 0;
2605         } else {
2606                 unsigned int i, frag_len, frag_size, pages;
2607                 struct sw_pg *rx_pg;
2608                 u16 pg_cons = bnapi->rx_pg_cons;
2609                 u16 pg_prod = bnapi->rx_pg_prod;
2610
2611                 frag_size = len + 4 - hdr_len;
2612                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2613                 skb_put(skb, hdr_len);
2614
2615                 for (i = 0; i < pages; i++) {
2616                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2617                         if (unlikely(frag_len <= 4)) {
2618                                 unsigned int tail = 4 - frag_len;
2619
2620                                 bnapi->rx_pg_cons = pg_cons;
2621                                 bnapi->rx_pg_prod = pg_prod;
2622                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2623                                                         pages - i);
2624                                 skb->len -= tail;
2625                                 if (i == 0) {
2626                                         skb->tail -= tail;
2627                                 } else {
2628                                         skb_frag_t *frag =
2629                                                 &skb_shinfo(skb)->frags[i - 1];
2630                                         frag->size -= tail;
2631                                         skb->data_len -= tail;
2632                                         skb->truesize -= tail;
2633                                 }
2634                                 return 0;
2635                         }
2636                         rx_pg = &bp->rx_pg_ring[pg_cons];
2637
2638                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2639                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2640
2641                         if (i == pages - 1)
2642                                 frag_len -= 4;
2643
2644                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2645                         rx_pg->page = NULL;
2646
2647                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2648                         if (unlikely(err)) {
2649                                 bnapi->rx_pg_cons = pg_cons;
2650                                 bnapi->rx_pg_prod = pg_prod;
2651                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2652                                                         pages - i);
2653                                 return err;
2654                         }
2655
2656                         frag_size -= frag_len;
2657                         skb->data_len += frag_len;
2658                         skb->truesize += frag_len;
2659                         skb->len += frag_len;
2660
2661                         pg_prod = NEXT_RX_BD(pg_prod);
2662                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2663                 }
2664                 bnapi->rx_pg_prod = pg_prod;
2665                 bnapi->rx_pg_cons = pg_cons;
2666         }
2667         return 0;
2668 }
2669
2670 static inline u16
2671 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2672 {
2673         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2674
2675         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2676                 cons++;
2677         return cons;
2678 }
2679
2680 static int
2681 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2682 {
2683         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2684         struct l2_fhdr *rx_hdr;
2685         int rx_pkt = 0, pg_ring_used = 0;
2686
2687         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2688         sw_cons = bnapi->rx_cons;
2689         sw_prod = bnapi->rx_prod;
2690
2691         /* Memory barrier necessary as speculative reads of the rx
2692          * buffer can be ahead of the index in the status block
2693          */
2694         rmb();
2695         while (sw_cons != hw_cons) {
2696                 unsigned int len, hdr_len;
2697                 u32 status;
2698                 struct sw_bd *rx_buf;
2699                 struct sk_buff *skb;
2700                 dma_addr_t dma_addr;
2701
2702                 sw_ring_cons = RX_RING_IDX(sw_cons);
2703                 sw_ring_prod = RX_RING_IDX(sw_prod);
2704
2705                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2706                 skb = rx_buf->skb;
2707
2708                 rx_buf->skb = NULL;
2709
2710                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2711
2712                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2713                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2714
2715                 rx_hdr = (struct l2_fhdr *) skb->data;
2716                 len = rx_hdr->l2_fhdr_pkt_len;
2717
2718                 if ((status = rx_hdr->l2_fhdr_status) &
2719                         (L2_FHDR_ERRORS_BAD_CRC |
2720                         L2_FHDR_ERRORS_PHY_DECODE |
2721                         L2_FHDR_ERRORS_ALIGNMENT |
2722                         L2_FHDR_ERRORS_TOO_SHORT |
2723                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2724
2725                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2726                                           sw_ring_prod);
2727                         goto next_rx;
2728                 }
2729                 hdr_len = 0;
2730                 if (status & L2_FHDR_STATUS_SPLIT) {
2731                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2732                         pg_ring_used = 1;
2733                 } else if (len > bp->rx_jumbo_thresh) {
2734                         hdr_len = bp->rx_jumbo_thresh;
2735                         pg_ring_used = 1;
2736                 }
2737
2738                 len -= 4;
2739
2740                 if (len <= bp->rx_copy_thresh) {
2741                         struct sk_buff *new_skb;
2742
2743                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2744                         if (new_skb == NULL) {
2745                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2746                                                   sw_ring_prod);
2747                                 goto next_rx;
2748                         }
2749
2750                         /* aligned copy */
2751                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2752                                       new_skb->data, len + 2);
2753                         skb_reserve(new_skb, 2);
2754                         skb_put(new_skb, len);
2755
2756                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2757                                 sw_ring_cons, sw_ring_prod);
2758
2759                         skb = new_skb;
2760                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2761                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2762                         goto next_rx;
2763
2764                 skb->protocol = eth_type_trans(skb, bp->dev);
2765
2766                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2767                         (ntohs(skb->protocol) != 0x8100)) {
2768
2769                         dev_kfree_skb(skb);
2770                         goto next_rx;
2771
2772                 }
2773
2774                 skb->ip_summed = CHECKSUM_NONE;
2775                 if (bp->rx_csum &&
2776                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2777                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2778
2779                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2780                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2781                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2782                 }
2783
2784 #ifdef BCM_VLAN
2785                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2786                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2787                                 rx_hdr->l2_fhdr_vlan_tag);
2788                 }
2789                 else
2790 #endif
2791                         netif_receive_skb(skb);
2792
2793                 bp->dev->last_rx = jiffies;
2794                 rx_pkt++;
2795
2796 next_rx:
2797                 sw_cons = NEXT_RX_BD(sw_cons);
2798                 sw_prod = NEXT_RX_BD(sw_prod);
2799
2800                 if ((rx_pkt == budget))
2801                         break;
2802
2803                 /* Refresh hw_cons to see if there is new work */
2804                 if (sw_cons == hw_cons) {
2805                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2806                         rmb();
2807                 }
2808         }
2809         bnapi->rx_cons = sw_cons;
2810         bnapi->rx_prod = sw_prod;
2811
2812         if (pg_ring_used)
2813                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2814                          bnapi->rx_pg_prod);
2815
2816         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2817
2818         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2819
2820         mmiowb();
2821
2822         return rx_pkt;
2823
2824 }
2825
2826 /* MSI ISR - The only difference between this and the INTx ISR
2827  * is that the MSI interrupt is always serviced.
2828  */
2829 static irqreturn_t
2830 bnx2_msi(int irq, void *dev_instance)
2831 {
2832         struct net_device *dev = dev_instance;
2833         struct bnx2 *bp = netdev_priv(dev);
2834         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2835
2836         prefetch(bnapi->status_blk);
2837         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2838                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2839                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2840
2841         /* Return here if interrupt is disabled. */
2842         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2843                 return IRQ_HANDLED;
2844
2845         netif_rx_schedule(dev, &bnapi->napi);
2846
2847         return IRQ_HANDLED;
2848 }
2849
2850 static irqreturn_t
2851 bnx2_msi_1shot(int irq, void *dev_instance)
2852 {
2853         struct net_device *dev = dev_instance;
2854         struct bnx2 *bp = netdev_priv(dev);
2855         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2856
2857         prefetch(bnapi->status_blk);
2858
2859         /* Return here if interrupt is disabled. */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2861                 return IRQ_HANDLED;
2862
2863         netif_rx_schedule(dev, &bnapi->napi);
2864
2865         return IRQ_HANDLED;
2866 }
2867
2868 static irqreturn_t
2869 bnx2_interrupt(int irq, void *dev_instance)
2870 {
2871         struct net_device *dev = dev_instance;
2872         struct bnx2 *bp = netdev_priv(dev);
2873         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2874         struct status_block *sblk = bnapi->status_blk;
2875
2876         /* When using INTx, it is possible for the interrupt to arrive
2877          * at the CPU before the status block posted prior to the
2878          * interrupt. Reading a register will flush the status block.
2879          * When using MSI, the MSI message will always complete after
2880          * the status block write.
2881          */
2882         if ((sblk->status_idx == bnapi->last_status_idx) &&
2883             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2884              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2885                 return IRQ_NONE;
2886
2887         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2888                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2889                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2890
2891         /* Read back to deassert IRQ immediately to avoid too many
2892          * spurious interrupts.
2893          */
2894         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2895
2896         /* Return here if interrupt is shared and is disabled. */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2898                 return IRQ_HANDLED;
2899
2900         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2901                 bnapi->last_status_idx = sblk->status_idx;
2902                 __netif_rx_schedule(dev, &bnapi->napi);
2903         }
2904
2905         return IRQ_HANDLED;
2906 }
2907
2908 static irqreturn_t
2909 bnx2_tx_msix(int irq, void *dev_instance)
2910 {
2911         struct net_device *dev = dev_instance;
2912         struct bnx2 *bp = netdev_priv(dev);
2913         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2914
2915         prefetch(bnapi->status_blk_msix);
2916
2917         /* Return here if interrupt is disabled. */
2918         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2919                 return IRQ_HANDLED;
2920
2921         netif_rx_schedule(dev, &bnapi->napi);
2922         return IRQ_HANDLED;
2923 }
2924
2925 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2926                                  STATUS_ATTN_BITS_TIMER_ABORT)
2927
2928 static inline int
2929 bnx2_has_work(struct bnx2_napi *bnapi)
2930 {
2931         struct status_block *sblk = bnapi->status_blk;
2932
2933         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2934             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2935                 return 1;
2936
2937         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2938             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2939                 return 1;
2940
2941         return 0;
2942 }
2943
2944 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2945 {
2946         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2947         struct bnx2 *bp = bnapi->bp;
2948         int work_done = 0;
2949         struct status_block_msix *sblk = bnapi->status_blk_msix;
2950
2951         do {
2952                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2953                 if (unlikely(work_done >= budget))
2954                         return work_done;
2955
2956                 bnapi->last_status_idx = sblk->status_idx;
2957                 rmb();
2958         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2959
2960         netif_rx_complete(bp->dev, napi);
2961         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2962                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2963                bnapi->last_status_idx);
2964         return work_done;
2965 }
2966
2967 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2968                           int work_done, int budget)
2969 {
2970         struct status_block *sblk = bnapi->status_blk;
2971         u32 status_attn_bits = sblk->status_attn_bits;
2972         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2973
2974         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2975             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2976
2977                 bnx2_phy_int(bp, bnapi);
2978
2979                 /* This is needed to take care of transient status
2980                  * during link changes.
2981                  */
2982                 REG_WR(bp, BNX2_HC_COMMAND,
2983                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2984                 REG_RD(bp, BNX2_HC_COMMAND);
2985         }
2986
2987         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2988                 bnx2_tx_int(bp, bnapi, 0);
2989
2990         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2991                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2992
2993         return work_done;
2994 }
2995
2996 static int bnx2_poll(struct napi_struct *napi, int budget)
2997 {
2998         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2999         struct bnx2 *bp = bnapi->bp;
3000         int work_done = 0;
3001         struct status_block *sblk = bnapi->status_blk;
3002
3003         while (1) {
3004                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3005
3006                 if (unlikely(work_done >= budget))
3007                         break;
3008
3009                 /* bnapi->last_status_idx is used below to tell the hw how
3010                  * much work has been processed, so we must read it before
3011                  * checking for more work.
3012                  */
3013                 bnapi->last_status_idx = sblk->status_idx;
3014                 rmb();
3015                 if (likely(!bnx2_has_work(bnapi))) {
3016                         netif_rx_complete(bp->dev, napi);
3017                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3018                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3019                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3020                                        bnapi->last_status_idx);
3021                                 break;
3022                         }
3023                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3024                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3025                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3026                                bnapi->last_status_idx);
3027
3028                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3029                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3030                                bnapi->last_status_idx);
3031                         break;
3032                 }
3033         }
3034
3035         return work_done;
3036 }
3037
3038 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3039  * from set_multicast.
3040  */
3041 static void
3042 bnx2_set_rx_mode(struct net_device *dev)
3043 {
3044         struct bnx2 *bp = netdev_priv(dev);
3045         u32 rx_mode, sort_mode;
3046         int i;
3047
3048         spin_lock_bh(&bp->phy_lock);
3049
3050         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3051                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3052         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3053 #ifdef BCM_VLAN
3054         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3055                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3056 #else
3057         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3058                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3059 #endif
3060         if (dev->flags & IFF_PROMISC) {
3061                 /* Promiscuous mode. */
3062                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3063                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3064                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3065         }
3066         else if (dev->flags & IFF_ALLMULTI) {
3067                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3068                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3069                                0xffffffff);
3070                 }
3071                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3072         }
3073         else {
3074                 /* Accept one or more multicast(s). */
3075                 struct dev_mc_list *mclist;
3076                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3077                 u32 regidx;
3078                 u32 bit;
3079                 u32 crc;
3080
3081                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3082
3083                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3084                      i++, mclist = mclist->next) {
3085
3086                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3087                         bit = crc & 0xff;
3088                         regidx = (bit & 0xe0) >> 5;
3089                         bit &= 0x1f;
3090                         mc_filter[regidx] |= (1 << bit);
3091                 }
3092
3093                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3094                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3095                                mc_filter[i]);
3096                 }
3097
3098                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3099         }
3100
3101         if (rx_mode != bp->rx_mode) {
3102                 bp->rx_mode = rx_mode;
3103                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3104         }
3105
3106         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3107         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3108         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3109
3110         spin_unlock_bh(&bp->phy_lock);
3111 }
3112
3113 static void
3114 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3115         u32 rv2p_proc)
3116 {
3117         int i;
3118         u32 val;
3119
3120
3121         for (i = 0; i < rv2p_code_len; i += 8) {
3122                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3123                 rv2p_code++;
3124                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3125                 rv2p_code++;
3126
3127                 if (rv2p_proc == RV2P_PROC1) {
3128                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3129                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3130                 }
3131                 else {
3132                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3133                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3134                 }
3135         }
3136
3137         /* Reset the processor, un-stall is done later. */
3138         if (rv2p_proc == RV2P_PROC1) {
3139                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3140         }
3141         else {
3142                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3143         }
3144 }
3145
3146 static int
3147 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3148 {
3149         u32 offset;
3150         u32 val;
3151         int rc;
3152
3153         /* Halt the CPU. */
3154         val = REG_RD_IND(bp, cpu_reg->mode);
3155         val |= cpu_reg->mode_value_halt;
3156         REG_WR_IND(bp, cpu_reg->mode, val);
3157         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3158
3159         /* Load the Text area. */
3160         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3161         if (fw->gz_text) {
3162                 int j;
3163
3164                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3165                                        fw->gz_text_len);
3166                 if (rc < 0)
3167                         return rc;
3168
3169                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3170                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3171                 }
3172         }
3173
3174         /* Load the Data area. */
3175         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3176         if (fw->data) {
3177                 int j;
3178
3179                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3180                         REG_WR_IND(bp, offset, fw->data[j]);
3181                 }
3182         }
3183
3184         /* Load the SBSS area. */
3185         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3186         if (fw->sbss_len) {
3187                 int j;
3188
3189                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3190                         REG_WR_IND(bp, offset, 0);
3191                 }
3192         }
3193
3194         /* Load the BSS area. */
3195         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3196         if (fw->bss_len) {
3197                 int j;
3198
3199                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3200                         REG_WR_IND(bp, offset, 0);
3201                 }
3202         }
3203
3204         /* Load the Read-Only area. */
3205         offset = cpu_reg->spad_base +
3206                 (fw->rodata_addr - cpu_reg->mips_view_base);
3207         if (fw->rodata) {
3208                 int j;
3209
3210                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3211                         REG_WR_IND(bp, offset, fw->rodata[j]);
3212                 }
3213         }
3214
3215         /* Clear the pre-fetch instruction. */
3216         REG_WR_IND(bp, cpu_reg->inst, 0);
3217         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3218
3219         /* Start the CPU. */
3220         val = REG_RD_IND(bp, cpu_reg->mode);
3221         val &= ~cpu_reg->mode_value_halt;
3222         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3223         REG_WR_IND(bp, cpu_reg->mode, val);
3224
3225         return 0;
3226 }
3227
3228 static int
3229 bnx2_init_cpus(struct bnx2 *bp)
3230 {
3231         struct cpu_reg cpu_reg;
3232         struct fw_info *fw;
3233         int rc, rv2p_len;
3234         void *text, *rv2p;
3235
3236         /* Initialize the RV2P processor. */
3237         text = vmalloc(FW_BUF_SIZE);
3238         if (!text)
3239                 return -ENOMEM;
3240         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3241                 rv2p = bnx2_xi_rv2p_proc1;
3242                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3243         } else {
3244                 rv2p = bnx2_rv2p_proc1;
3245                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3246         }
3247         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3248         if (rc < 0)
3249                 goto init_cpu_err;
3250
3251         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3252
3253         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3254                 rv2p = bnx2_xi_rv2p_proc2;
3255                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3256         } else {
3257                 rv2p = bnx2_rv2p_proc2;
3258                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3259         }
3260         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3261         if (rc < 0)
3262                 goto init_cpu_err;
3263
3264         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3265
3266         /* Initialize the RX Processor. */
3267         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3268         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3269         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3270         cpu_reg.state = BNX2_RXP_CPU_STATE;
3271         cpu_reg.state_value_clear = 0xffffff;
3272         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3273         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3274         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3275         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3276         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3277         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3278         cpu_reg.mips_view_base = 0x8000000;
3279
3280         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3281                 fw = &bnx2_rxp_fw_09;
3282         else
3283                 fw = &bnx2_rxp_fw_06;
3284
3285         fw->text = text;
3286         rc = load_cpu_fw(bp, &cpu_reg, fw);
3287         if (rc)
3288                 goto init_cpu_err;
3289
3290         /* Initialize the TX Processor. */
3291         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3292         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3293         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3294         cpu_reg.state = BNX2_TXP_CPU_STATE;
3295         cpu_reg.state_value_clear = 0xffffff;
3296         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3297         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3298         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3299         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3300         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3301         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3302         cpu_reg.mips_view_base = 0x8000000;
3303
3304         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3305                 fw = &bnx2_txp_fw_09;
3306         else
3307                 fw = &bnx2_txp_fw_06;
3308
3309         fw->text = text;
3310         rc = load_cpu_fw(bp, &cpu_reg, fw);
3311         if (rc)
3312                 goto init_cpu_err;
3313
3314         /* Initialize the TX Patch-up Processor. */
3315         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3316         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3317         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3318         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3319         cpu_reg.state_value_clear = 0xffffff;
3320         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3321         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3322         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3323         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3324         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3325         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3326         cpu_reg.mips_view_base = 0x8000000;
3327
3328         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3329                 fw = &bnx2_tpat_fw_09;
3330         else
3331                 fw = &bnx2_tpat_fw_06;
3332
3333         fw->text = text;
3334         rc = load_cpu_fw(bp, &cpu_reg, fw);
3335         if (rc)
3336                 goto init_cpu_err;
3337
3338         /* Initialize the Completion Processor. */
3339         cpu_reg.mode = BNX2_COM_CPU_MODE;
3340         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3341         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3342         cpu_reg.state = BNX2_COM_CPU_STATE;
3343         cpu_reg.state_value_clear = 0xffffff;
3344         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3345         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3346         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3347         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3348         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3349         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3350         cpu_reg.mips_view_base = 0x8000000;
3351
3352         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353                 fw = &bnx2_com_fw_09;
3354         else
3355                 fw = &bnx2_com_fw_06;
3356
3357         fw->text = text;
3358         rc = load_cpu_fw(bp, &cpu_reg, fw);
3359         if (rc)
3360                 goto init_cpu_err;
3361
3362         /* Initialize the Command Processor. */
3363         cpu_reg.mode = BNX2_CP_CPU_MODE;
3364         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3365         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3366         cpu_reg.state = BNX2_CP_CPU_STATE;
3367         cpu_reg.state_value_clear = 0xffffff;
3368         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3369         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3370         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3371         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3372         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3373         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3374         cpu_reg.mips_view_base = 0x8000000;
3375
3376         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3377                 fw = &bnx2_cp_fw_09;
3378         else
3379                 fw = &bnx2_cp_fw_06;
3380
3381         fw->text = text;
3382         rc = load_cpu_fw(bp, &cpu_reg, fw);
3383
3384 init_cpu_err:
3385         vfree(text);
3386         return rc;
3387 }
3388
3389 static int
3390 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3391 {
3392         u16 pmcsr;
3393
3394         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3395
3396         switch (state) {
3397         case PCI_D0: {
3398                 u32 val;
3399
3400                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3401                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3402                         PCI_PM_CTRL_PME_STATUS);
3403
3404                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3405                         /* delay required during transition out of D3hot */
3406                         msleep(20);
3407
3408                 val = REG_RD(bp, BNX2_EMAC_MODE);
3409                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3410                 val &= ~BNX2_EMAC_MODE_MPKT;
3411                 REG_WR(bp, BNX2_EMAC_MODE, val);
3412
3413                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3414                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3415                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3416                 break;
3417         }
3418         case PCI_D3hot: {
3419                 int i;
3420                 u32 val, wol_msg;
3421
3422                 if (bp->wol) {
3423                         u32 advertising;
3424                         u8 autoneg;
3425
3426                         autoneg = bp->autoneg;
3427                         advertising = bp->advertising;
3428
3429                         if (bp->phy_port == PORT_TP) {
3430                                 bp->autoneg = AUTONEG_SPEED;
3431                                 bp->advertising = ADVERTISED_10baseT_Half |
3432                                         ADVERTISED_10baseT_Full |
3433                                         ADVERTISED_100baseT_Half |
3434                                         ADVERTISED_100baseT_Full |
3435                                         ADVERTISED_Autoneg;
3436                         }
3437
3438                         spin_lock_bh(&bp->phy_lock);
3439                         bnx2_setup_phy(bp, bp->phy_port);
3440                         spin_unlock_bh(&bp->phy_lock);
3441
3442                         bp->autoneg = autoneg;
3443                         bp->advertising = advertising;
3444
3445                         bnx2_set_mac_addr(bp);
3446
3447                         val = REG_RD(bp, BNX2_EMAC_MODE);
3448
3449                         /* Enable port mode. */
3450                         val &= ~BNX2_EMAC_MODE_PORT;
3451                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3452                                BNX2_EMAC_MODE_ACPI_RCVD |
3453                                BNX2_EMAC_MODE_MPKT;
3454                         if (bp->phy_port == PORT_TP)
3455                                 val |= BNX2_EMAC_MODE_PORT_MII;
3456                         else {
3457                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3458                                 if (bp->line_speed == SPEED_2500)
3459                                         val |= BNX2_EMAC_MODE_25G_MODE;
3460                         }
3461
3462                         REG_WR(bp, BNX2_EMAC_MODE, val);
3463
3464                         /* receive all multicast */
3465                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3466                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3467                                        0xffffffff);
3468                         }
3469                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3470                                BNX2_EMAC_RX_MODE_SORT_MODE);
3471
3472                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3473                               BNX2_RPM_SORT_USER0_MC_EN;
3474                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3475                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3476                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3477                                BNX2_RPM_SORT_USER0_ENA);
3478
3479                         /* Need to enable EMAC and RPM for WOL. */
3480                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3481                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3482                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3483                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3484
3485                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3486                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3487                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3488
3489                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3490                 }
3491                 else {
3492                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3493                 }
3494
3495                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3496                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3497
3498                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3499                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3500                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3501
3502                         if (bp->wol)
3503                                 pmcsr |= 3;
3504                 }
3505                 else {
3506                         pmcsr |= 3;
3507                 }
3508                 if (bp->wol) {
3509                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3510                 }
3511                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3512                                       pmcsr);
3513
3514                 /* No more memory access after this point until
3515                  * device is brought back to D0.
3516                  */
3517                 udelay(50);
3518                 break;
3519         }
3520         default:
3521                 return -EINVAL;
3522         }
3523         return 0;
3524 }
3525
3526 static int
3527 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3528 {
3529         u32 val;
3530         int j;
3531
3532         /* Request access to the flash interface. */
3533         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3534         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3535                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3536                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3537                         break;
3538
3539                 udelay(5);
3540         }
3541
3542         if (j >= NVRAM_TIMEOUT_COUNT)
3543                 return -EBUSY;
3544
3545         return 0;
3546 }
3547
3548 static int
3549 bnx2_release_nvram_lock(struct bnx2 *bp)
3550 {
3551         int j;
3552         u32 val;
3553
3554         /* Relinquish nvram interface. */
3555         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3556
3557         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3558                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3559                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3560                         break;
3561
3562                 udelay(5);
3563         }
3564
3565         if (j >= NVRAM_TIMEOUT_COUNT)
3566                 return -EBUSY;
3567
3568         return 0;
3569 }
3570
3571
3572 static int
3573 bnx2_enable_nvram_write(struct bnx2 *bp)
3574 {
3575         u32 val;
3576
3577         val = REG_RD(bp, BNX2_MISC_CFG);
3578         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3579
3580         if (bp->flash_info->flags & BNX2_NV_WREN) {
3581                 int j;
3582
3583                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584                 REG_WR(bp, BNX2_NVM_COMMAND,
3585                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3586
3587                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3588                         udelay(5);
3589
3590                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3591                         if (val & BNX2_NVM_COMMAND_DONE)
3592                                 break;
3593                 }
3594
3595                 if (j >= NVRAM_TIMEOUT_COUNT)
3596                         return -EBUSY;
3597         }
3598         return 0;
3599 }
3600
3601 static void
3602 bnx2_disable_nvram_write(struct bnx2 *bp)
3603 {
3604         u32 val;
3605
3606         val = REG_RD(bp, BNX2_MISC_CFG);
3607         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3608 }
3609
3610
3611 static void
3612 bnx2_enable_nvram_access(struct bnx2 *bp)
3613 {
3614         u32 val;
3615
3616         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3617         /* Enable both bits, even on read. */
3618         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3619                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3620 }
3621
3622 static void
3623 bnx2_disable_nvram_access(struct bnx2 *bp)
3624 {
3625         u32 val;
3626
3627         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3628         /* Disable both bits, even after read. */
3629         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3630                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3631                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3632 }
3633
3634 static int
3635 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3636 {
3637         u32 cmd;
3638         int j;
3639
3640         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3641                 /* Buffered flash, no erase needed */
3642                 return 0;
3643
3644         /* Build an erase command */
3645         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3646               BNX2_NVM_COMMAND_DOIT;
3647
3648         /* Need to clear DONE bit separately. */
3649         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3650
3651         /* Address of the NVRAM to read from. */
3652         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3653
3654         /* Issue an erase command. */
3655         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3656
3657         /* Wait for completion. */
3658         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3659                 u32 val;
3660
3661                 udelay(5);
3662
3663                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3664                 if (val & BNX2_NVM_COMMAND_DONE)
3665                         break;
3666         }
3667
3668         if (j >= NVRAM_TIMEOUT_COUNT)
3669                 return -EBUSY;
3670
3671         return 0;
3672 }
3673
3674 static int
3675 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3676 {
3677         u32 cmd;
3678         int j;
3679
3680         /* Build the command word. */
3681         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3682
3683         /* Calculate an offset of a buffered flash, not needed for 5709. */
3684         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3685                 offset = ((offset / bp->flash_info->page_size) <<
3686                            bp->flash_info->page_bits) +
3687                           (offset % bp->flash_info->page_size);
3688         }
3689
3690         /* Need to clear DONE bit separately. */
3691         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3692
3693         /* Address of the NVRAM to read from. */
3694         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3695
3696         /* Issue a read command. */
3697         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3698
3699         /* Wait for completion. */
3700         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701                 u32 val;
3702
3703                 udelay(5);
3704
3705                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3706                 if (val & BNX2_NVM_COMMAND_DONE) {
3707                         val = REG_RD(bp, BNX2_NVM_READ);
3708
3709                         val = be32_to_cpu(val);
3710                         memcpy(ret_val, &val, 4);
3711                         break;
3712                 }
3713         }
3714         if (j >= NVRAM_TIMEOUT_COUNT)
3715                 return -EBUSY;
3716
3717         return 0;
3718 }
3719
3720
3721 static int
3722 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3723 {
3724         u32 cmd, val32;
3725         int j;
3726
3727         /* Build the command word. */
3728         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3729
3730         /* Calculate an offset of a buffered flash, not needed for 5709. */
3731         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3732                 offset = ((offset / bp->flash_info->page_size) <<
3733                           bp->flash_info->page_bits) +
3734                          (offset % bp->flash_info->page_size);
3735         }
3736
3737         /* Need to clear DONE bit separately. */
3738         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3739
3740         memcpy(&val32, val, 4);
3741         val32 = cpu_to_be32(val32);
3742
3743         /* Write the data. */
3744         REG_WR(bp, BNX2_NVM_WRITE, val32);
3745
3746         /* Address of the NVRAM to write to. */
3747         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3748
3749         /* Issue the write command. */
3750         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3751
3752         /* Wait for completion. */
3753         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3754                 udelay(5);
3755
3756                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3757                         break;
3758         }
3759         if (j >= NVRAM_TIMEOUT_COUNT)
3760                 return -EBUSY;
3761
3762         return 0;
3763 }
3764
3765 static int
3766 bnx2_init_nvram(struct bnx2 *bp)
3767 {
3768         u32 val;
3769         int j, entry_count, rc = 0;
3770         struct flash_spec *flash;
3771
3772         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3773                 bp->flash_info = &flash_5709;
3774                 goto get_flash_size;
3775         }
3776
3777         /* Determine the selected interface. */
3778         val = REG_RD(bp, BNX2_NVM_CFG1);
3779
3780         entry_count = ARRAY_SIZE(flash_table);
3781
3782         if (val & 0x40000000) {
3783
3784                 /* Flash interface has been reconfigured */
3785                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3786                      j++, flash++) {
3787                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3788                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3789                                 bp->flash_info = flash;
3790                                 break;
3791                         }
3792                 }
3793         }
3794         else {
3795                 u32 mask;
3796                 /* Not yet been reconfigured */
3797
3798                 if (val & (1 << 23))
3799                         mask = FLASH_BACKUP_STRAP_MASK;
3800                 else
3801                         mask = FLASH_STRAP_MASK;
3802
3803                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3804                         j++, flash++) {
3805
3806                         if ((val & mask) == (flash->strapping & mask)) {
3807                                 bp->flash_info = flash;
3808
3809                                 /* Request access to the flash interface. */
3810                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3811                                         return rc;
3812
3813                                 /* Enable access to flash interface */
3814                                 bnx2_enable_nvram_access(bp);
3815
3816                                 /* Reconfigure the flash interface */
3817                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3818                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3819                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3820                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3821
3822                                 /* Disable access to flash interface */
3823                                 bnx2_disable_nvram_access(bp);
3824                                 bnx2_release_nvram_lock(bp);
3825
3826                                 break;
3827                         }
3828                 }
3829         } /* if (val & 0x40000000) */
3830
3831         if (j == entry_count) {
3832                 bp->flash_info = NULL;
3833                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3834                 return -ENODEV;
3835         }
3836
3837 get_flash_size:
3838         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3839         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3840         if (val)
3841                 bp->flash_size = val;
3842         else
3843                 bp->flash_size = bp->flash_info->total_size;
3844
3845         return rc;
3846 }
3847
3848 static int
3849 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3850                 int buf_size)
3851 {
3852         int rc = 0;
3853         u32 cmd_flags, offset32, len32, extra;
3854
3855         if (buf_size == 0)
3856                 return 0;
3857
3858         /* Request access to the flash interface. */
3859         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3860                 return rc;
3861
3862         /* Enable access to flash interface */
3863         bnx2_enable_nvram_access(bp);
3864
3865         len32 = buf_size;
3866         offset32 = offset;
3867         extra = 0;
3868
3869         cmd_flags = 0;
3870
3871         if (offset32 & 3) {
3872                 u8 buf[4];
3873                 u32 pre_len;
3874
3875                 offset32 &= ~3;
3876                 pre_len = 4 - (offset & 3);
3877
3878                 if (pre_len >= len32) {
3879                         pre_len = len32;
3880                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3881                                     BNX2_NVM_COMMAND_LAST;
3882                 }
3883                 else {
3884                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3885                 }
3886
3887                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3888
3889                 if (rc)
3890                         return rc;
3891
3892                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3893
3894                 offset32 += 4;
3895                 ret_buf += pre_len;
3896                 len32 -= pre_len;
3897         }
3898         if (len32 & 3) {
3899                 extra = 4 - (len32 & 3);
3900                 len32 = (len32 + 4) & ~3;
3901         }
3902
3903         if (len32 == 4) {
3904                 u8 buf[4];
3905
3906                 if (cmd_flags)
3907                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3908                 else
3909                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3910                                     BNX2_NVM_COMMAND_LAST;
3911
3912                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3913
3914                 memcpy(ret_buf, buf, 4 - extra);
3915         }
3916         else if (len32 > 0) {
3917                 u8 buf[4];
3918
3919                 /* Read the first word. */
3920                 if (cmd_flags)
3921                         cmd_flags = 0;
3922                 else
3923                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3924
3925                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3926
3927                 /* Advance to the next dword. */
3928                 offset32 += 4;
3929                 ret_buf += 4;
3930                 len32 -= 4;
3931
3932                 while (len32 > 4 && rc == 0) {
3933                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3934
3935                         /* Advance to the next dword. */
3936                         offset32 += 4;
3937                         ret_buf += 4;
3938                         len32 -= 4;
3939                 }
3940
3941                 if (rc)
3942                         return rc;
3943
3944                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3945                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3946
3947                 memcpy(ret_buf, buf, 4 - extra);
3948         }
3949
3950         /* Disable access to flash interface */
3951         bnx2_disable_nvram_access(bp);
3952
3953         bnx2_release_nvram_lock(bp);
3954
3955         return rc;
3956 }
3957
3958 static int
3959 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3960                 int buf_size)
3961 {
3962         u32 written, offset32, len32;
3963         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3964         int rc = 0;
3965         int align_start, align_end;
3966
3967         buf = data_buf;
3968         offset32 = offset;
3969         len32 = buf_size;
3970         align_start = align_end = 0;
3971
3972         if ((align_start = (offset32 & 3))) {
3973                 offset32 &= ~3;
3974                 len32 += align_start;
3975                 if (len32 < 4)
3976                         len32 = 4;
3977                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3978                         return rc;
3979         }
3980
3981         if (len32 & 3) {
3982                 align_end = 4 - (len32 & 3);
3983                 len32 += align_end;
3984                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3985                         return rc;
3986         }
3987
3988         if (align_start || align_end) {
3989                 align_buf = kmalloc(len32, GFP_KERNEL);
3990                 if (align_buf == NULL)
3991                         return -ENOMEM;
3992                 if (align_start) {
3993                         memcpy(align_buf, start, 4);
3994                 }
3995                 if (align_end) {
3996                         memcpy(align_buf + len32 - 4, end, 4);
3997                 }
3998                 memcpy(align_buf + align_start, data_buf, buf_size);
3999                 buf = align_buf;
4000         }
4001
4002         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4003                 flash_buffer = kmalloc(264, GFP_KERNEL);
4004                 if (flash_buffer == NULL) {
4005                         rc = -ENOMEM;
4006                         goto nvram_write_end;
4007                 }
4008         }
4009
4010         written = 0;
4011         while ((written < len32) && (rc == 0)) {
4012                 u32 page_start, page_end, data_start, data_end;
4013                 u32 addr, cmd_flags;
4014                 int i;
4015
4016                 /* Find the page_start addr */
4017                 page_start = offset32 + written;
4018                 page_start -= (page_start % bp->flash_info->page_size);
4019                 /* Find the page_end addr */
4020                 page_end = page_start + bp->flash_info->page_size;
4021                 /* Find the data_start addr */
4022                 data_start = (written == 0) ? offset32 : page_start;
4023                 /* Find the data_end addr */
4024                 data_end = (page_end > offset32 + len32) ?
4025                         (offset32 + len32) : page_end;
4026
4027                 /* Request access to the flash interface. */
4028                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4029                         goto nvram_write_end;
4030
4031                 /* Enable access to flash interface */
4032                 bnx2_enable_nvram_access(bp);
4033
4034                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4035                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4036                         int j;
4037
4038                         /* Read the whole page into the buffer
4039                          * (non-buffer flash only) */
4040                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4041                                 if (j == (bp->flash_info->page_size - 4)) {
4042                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4043                                 }
4044                                 rc = bnx2_nvram_read_dword(bp,
4045                                         page_start + j,
4046                                         &flash_buffer[j],
4047                                         cmd_flags);
4048
4049                                 if (rc)
4050                                         goto nvram_write_end;
4051
4052                                 cmd_flags = 0;
4053                         }
4054                 }
4055
4056                 /* Enable writes to flash interface (unlock write-protect) */
4057                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4058                         goto nvram_write_end;
4059
4060                 /* Loop to write back the buffer data from page_start to
4061                  * data_start */
4062                 i = 0;
4063                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4064                         /* Erase the page */
4065                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4066                                 goto nvram_write_end;
4067
4068                         /* Re-enable the write again for the actual write */
4069                         bnx2_enable_nvram_write(bp);
4070
4071                         for (addr = page_start; addr < data_start;
4072                                 addr += 4, i += 4) {
4073
4074                                 rc = bnx2_nvram_write_dword(bp, addr,
4075                                         &flash_buffer[i], cmd_flags);
4076
4077                                 if (rc != 0)
4078                                         goto nvram_write_end;
4079
4080                                 cmd_flags = 0;
4081                         }
4082                 }
4083
4084                 /* Loop to write the new data from data_start to data_end */
4085                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4086                         if ((addr == page_end - 4) ||
4087                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4088                                  (addr == data_end - 4))) {
4089
4090                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4091                         }
4092                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4093                                 cmd_flags);
4094
4095                         if (rc != 0)
4096                                 goto nvram_write_end;
4097
4098                         cmd_flags = 0;
4099                         buf += 4;
4100                 }
4101
4102                 /* Loop to write back the buffer data from data_end
4103                  * to page_end */
4104                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4105                         for (addr = data_end; addr < page_end;
4106                                 addr += 4, i += 4) {
4107
4108                                 if (addr == page_end-4) {
4109                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4110                                 }
4111                                 rc = bnx2_nvram_write_dword(bp, addr,
4112                                         &flash_buffer[i], cmd_flags);
4113
4114                                 if (rc != 0)
4115                                         goto nvram_write_end;
4116
4117                                 cmd_flags = 0;
4118                         }
4119                 }
4120
4121                 /* Disable writes to flash interface (lock write-protect) */
4122                 bnx2_disable_nvram_write(bp);
4123
4124                 /* Disable access to flash interface */
4125                 bnx2_disable_nvram_access(bp);
4126                 bnx2_release_nvram_lock(bp);
4127
4128                 /* Increment written */
4129                 written += data_end - data_start;
4130         }
4131
4132 nvram_write_end:
4133         kfree(flash_buffer);
4134         kfree(align_buf);
4135         return rc;
4136 }
4137
4138 static void
4139 bnx2_init_remote_phy(struct bnx2 *bp)
4140 {
4141         u32 val;
4142
4143         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4144         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4145                 return;
4146
4147         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4148         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4149                 return;
4150
4151         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4152                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4153
4154                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4155                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4156                         bp->phy_port = PORT_FIBRE;
4157                 else
4158                         bp->phy_port = PORT_TP;
4159
4160                 if (netif_running(bp->dev)) {
4161                         u32 sig;
4162
4163                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4164                                 bp->link_up = 1;
4165                                 netif_carrier_on(bp->dev);
4166                         } else {
4167                                 bp->link_up = 0;
4168                                 netif_carrier_off(bp->dev);
4169                         }
4170                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4171                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4172                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4173                                    sig);
4174                 }
4175         }
4176 }
4177
4178 static void
4179 bnx2_setup_msix_tbl(struct bnx2 *bp)
4180 {
4181         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4182
4183         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4184         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4185 }
4186
4187 static int
4188 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4189 {
4190         u32 val;
4191         int i, rc = 0;
4192         u8 old_port;
4193
4194         /* Wait for the current PCI transaction to complete before
4195          * issuing a reset. */
4196         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4197                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4198                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4199                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4200                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4201         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4202         udelay(5);
4203
4204         /* Wait for the firmware to tell us it is ok to issue a reset. */
4205         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4206
4207         /* Deposit a driver reset signature so the firmware knows that
4208          * this is a soft reset. */
4209         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4210                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4211
4212         /* Do a dummy read to force the chip to complete all current transaction
4213          * before we issue a reset. */
4214         val = REG_RD(bp, BNX2_MISC_ID);
4215
4216         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4217                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4218                 REG_RD(bp, BNX2_MISC_COMMAND);
4219                 udelay(5);
4220
4221                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4222                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4223
4224                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4225
4226         } else {
4227                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4228                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4229                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4230
4231                 /* Chip reset. */
4232                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4233
4234                 /* Reading back any register after chip reset will hang the
4235                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4236                  * of margin for write posting.
4237                  */
4238                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4239                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4240                         msleep(20);
4241
4242                 /* Reset takes approximate 30 usec */
4243                 for (i = 0; i < 10; i++) {
4244                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4245                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4246                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4247                                 break;
4248                         udelay(10);
4249                 }
4250
4251                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4252                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4253                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4254                         return -EBUSY;
4255                 }
4256         }
4257
4258         /* Make sure byte swapping is properly configured. */
4259         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4260         if (val != 0x01020304) {
4261                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4262                 return -ENODEV;
4263         }
4264
4265         /* Wait for the firmware to finish its initialization. */
4266         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4267         if (rc)
4268                 return rc;
4269
4270         spin_lock_bh(&bp->phy_lock);
4271         old_port = bp->phy_port;
4272         bnx2_init_remote_phy(bp);
4273         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4274                 bnx2_set_default_remote_link(bp);
4275         spin_unlock_bh(&bp->phy_lock);
4276
4277         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4278                 /* Adjust the voltage regular to two steps lower.  The default
4279                  * of this register is 0x0000000e. */
4280                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4281
4282                 /* Remove bad rbuf memory from the free pool. */
4283                 rc = bnx2_alloc_bad_rbuf(bp);
4284         }
4285
4286         if (bp->flags & BNX2_FLAG_USING_MSIX)
4287                 bnx2_setup_msix_tbl(bp);
4288
4289         return rc;
4290 }
4291
4292 static int
4293 bnx2_init_chip(struct bnx2 *bp)
4294 {
4295         u32 val;
4296         int rc, i;
4297
4298         /* Make sure the interrupt is not active. */
4299         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4300
4301         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4302               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4303 #ifdef __BIG_ENDIAN
4304               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4305 #endif
4306               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4307               DMA_READ_CHANS << 12 |
4308               DMA_WRITE_CHANS << 16;
4309
4310         val |= (0x2 << 20) | (1 << 11);
4311
4312         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4313                 val |= (1 << 23);
4314
4315         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4316             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4317                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4318
4319         REG_WR(bp, BNX2_DMA_CONFIG, val);
4320
4321         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4322                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4323                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4324                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4325         }
4326
4327         if (bp->flags & BNX2_FLAG_PCIX) {
4328                 u16 val16;
4329
4330                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4331                                      &val16);
4332                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4333                                       val16 & ~PCI_X_CMD_ERO);
4334         }
4335
4336         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4337                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4338                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4339                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4340
4341         /* Initialize context mapping and zero out the quick contexts.  The
4342          * context block must have already been enabled. */
4343         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4344                 rc = bnx2_init_5709_context(bp);
4345                 if (rc)
4346                         return rc;
4347         } else
4348                 bnx2_init_context(bp);
4349
4350         if ((rc = bnx2_init_cpus(bp)) != 0)
4351                 return rc;
4352
4353         bnx2_init_nvram(bp);
4354
4355         bnx2_set_mac_addr(bp);
4356
4357         val = REG_RD(bp, BNX2_MQ_CONFIG);
4358         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4359         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4360         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4361                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4362
4363         REG_WR(bp, BNX2_MQ_CONFIG, val);
4364
4365         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4366         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4367         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4368
4369         val = (BCM_PAGE_BITS - 8) << 24;
4370         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4371
4372         /* Configure page size. */
4373         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4374         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4375         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4376         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4377
4378         val = bp->mac_addr[0] +
4379               (bp->mac_addr[1] << 8) +
4380               (bp->mac_addr[2] << 16) +
4381               bp->mac_addr[3] +
4382               (bp->mac_addr[4] << 8) +
4383               (bp->mac_addr[5] << 16);
4384         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4385
4386         /* Program the MTU.  Also include 4 bytes for CRC32. */
4387         val = bp->dev->mtu + ETH_HLEN + 4;
4388         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4389                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4390         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4391
4392         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4393                 bp->bnx2_napi[i].last_status_idx = 0;
4394
4395         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4396
4397         /* Set up how to generate a link change interrupt. */
4398         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4399
4400         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4401                (u64) bp->status_blk_mapping & 0xffffffff);
4402         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4403
4404         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4405                (u64) bp->stats_blk_mapping & 0xffffffff);
4406         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4407                (u64) bp->stats_blk_mapping >> 32);
4408
4409         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4410                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4411
4412         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4413                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4414
4415         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4416                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4417
4418         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4419
4420         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4421
4422         REG_WR(bp, BNX2_HC_COM_TICKS,
4423                (bp->com_ticks_int << 16) | bp->com_ticks);
4424
4425         REG_WR(bp, BNX2_HC_CMD_TICKS,
4426                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4427
4428         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4429                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4430         else
4431                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4432         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4433
4434         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4435                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4436         else {
4437                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4438                       BNX2_HC_CONFIG_COLLECT_STATS;
4439         }
4440
4441         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4442                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4443                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4444
4445                 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4446                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4447                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4448
4449                 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4450                         (bp->tx_quick_cons_trip_int << 16) |
4451                          bp->tx_quick_cons_trip);
4452
4453                 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4454                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4455
4456                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4457         }
4458
4459         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4460                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4461
4462         REG_WR(bp, BNX2_HC_CONFIG, val);
4463
4464         /* Clear internal stats counters. */
4465         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4466
4467         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4468
4469         /* Initialize the receive filter. */
4470         bnx2_set_rx_mode(bp->dev);
4471
4472         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4473                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4474                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4475                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4476         }
4477         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4478                           0);
4479
4480         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4481         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4482
4483         udelay(20);
4484
4485         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4486
4487         return rc;
4488 }
4489
4490 static void
4491 bnx2_clear_ring_states(struct bnx2 *bp)
4492 {
4493         struct bnx2_napi *bnapi;
4494         int i;
4495
4496         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4497                 bnapi = &bp->bnx2_napi[i];
4498
4499                 bnapi->tx_cons = 0;
4500                 bnapi->hw_tx_cons = 0;
4501                 bnapi->rx_prod_bseq = 0;
4502                 bnapi->rx_prod = 0;
4503                 bnapi->rx_cons = 0;
4504                 bnapi->rx_pg_prod = 0;
4505                 bnapi->rx_pg_cons = 0;
4506         }
4507 }
4508
4509 static void
4510 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4511 {
4512         u32 val, offset0, offset1, offset2, offset3;
4513
4514         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4515                 offset0 = BNX2_L2CTX_TYPE_XI;
4516                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4517                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4518                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4519         } else {
4520                 offset0 = BNX2_L2CTX_TYPE;
4521                 offset1 = BNX2_L2CTX_CMD_TYPE;
4522                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4523                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4524         }
4525         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4526         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4527
4528         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4529         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4530
4531         val = (u64) bp->tx_desc_mapping >> 32;
4532         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4533
4534         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4535         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4536 }
4537
4538 static void
4539 bnx2_init_tx_ring(struct bnx2 *bp)
4540 {
4541         struct tx_bd *txbd;
4542         u32 cid = TX_CID;
4543         struct bnx2_napi *bnapi;
4544
4545         bp->tx_vec = 0;
4546         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4547                 cid = TX_TSS_CID;
4548                 bp->tx_vec = BNX2_TX_VEC;
4549                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4550                        (TX_TSS_CID << 7));
4551         }
4552         bnapi = &bp->bnx2_napi[bp->tx_vec];
4553
4554         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4555
4556         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4557
4558         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4559         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4560
4561         bp->tx_prod = 0;
4562         bp->tx_prod_bseq = 0;
4563
4564         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4565         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4566
4567         bnx2_init_tx_context(bp, cid);
4568 }
4569
4570 static void
4571 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4572                      int num_rings)
4573 {
4574         int i;
4575         struct rx_bd *rxbd;
4576
4577         for (i = 0; i < num_rings; i++) {
4578                 int j;
4579
4580                 rxbd = &rx_ring[i][0];
4581                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4582                         rxbd->rx_bd_len = buf_size;
4583                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4584                 }
4585                 if (i == (num_rings - 1))
4586                         j = 0;
4587                 else
4588                         j = i + 1;
4589                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4590                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4591         }
4592 }
4593
4594 static void
4595 bnx2_init_rx_ring(struct bnx2 *bp)
4596 {
4597         int i;
4598         u16 prod, ring_prod;
4599         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4600         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4601
4602         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4603                              bp->rx_buf_use_size, bp->rx_max_ring);
4604
4605         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4606         if (bp->rx_pg_ring_size) {
4607                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4608                                      bp->rx_pg_desc_mapping,
4609                                      PAGE_SIZE, bp->rx_max_pg_ring);
4610                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4611                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4612                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4613                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4614
4615                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4616                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4617
4618                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4619                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4620
4621                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4622                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4623         }
4624
4625         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4626         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4627         val |= 0x02 << 8;
4628         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4629
4630         val = (u64) bp->rx_desc_mapping[0] >> 32;
4631         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4632
4633         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4634         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4635
4636         ring_prod = prod = bnapi->rx_pg_prod;
4637         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4638                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4639                         break;
4640                 prod = NEXT_RX_BD(prod);
4641                 ring_prod = RX_PG_RING_IDX(prod);
4642         }
4643         bnapi->rx_pg_prod = prod;
4644
4645         ring_prod = prod = bnapi->rx_prod;
4646         for (i = 0; i < bp->rx_ring_size; i++) {
4647                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4648                         break;
4649                 }
4650                 prod = NEXT_RX_BD(prod);
4651                 ring_prod = RX_RING_IDX(prod);
4652         }
4653         bnapi->rx_prod = prod;
4654
4655         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4656                  bnapi->rx_pg_prod);
4657         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4658
4659         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4660 }
4661
4662 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4663 {
4664         u32 max, num_rings = 1;
4665
4666         while (ring_size > MAX_RX_DESC_CNT) {
4667                 ring_size -= MAX_RX_DESC_CNT;
4668                 num_rings++;
4669         }
4670         /* round to next power of 2 */
4671         max = max_size;
4672         while ((max & num_rings) == 0)
4673                 max >>= 1;
4674
4675         if (num_rings != max)
4676                 max <<= 1;
4677
4678         return max;
4679 }
4680
4681 static void
4682 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4683 {
4684         u32 rx_size, rx_space, jumbo_size;
4685
4686         /* 8 for CRC and VLAN */
4687         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4688
4689         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4690                 sizeof(struct skb_shared_info);
4691
4692         bp->rx_copy_thresh = RX_COPY_THRESH;
4693         bp->rx_pg_ring_size = 0;
4694         bp->rx_max_pg_ring = 0;
4695         bp->rx_max_pg_ring_idx = 0;
4696         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4697                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4698
4699                 jumbo_size = size * pages;
4700                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4701                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4702
4703                 bp->rx_pg_ring_size = jumbo_size;
4704                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4705                                                         MAX_RX_PG_RINGS);
4706                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4707                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4708                 bp->rx_copy_thresh = 0;
4709         }
4710
4711         bp->rx_buf_use_size = rx_size;
4712         /* hw alignment */
4713         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4714         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4715         bp->rx_ring_size = size;
4716         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4717         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4718 }
4719
4720 static void
4721 bnx2_free_tx_skbs(struct bnx2 *bp)
4722 {
4723         int i;
4724
4725         if (bp->tx_buf_ring == NULL)
4726                 return;
4727
4728         for (i = 0; i < TX_DESC_CNT; ) {
4729                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4730                 struct sk_buff *skb = tx_buf->skb;
4731                 int j, last;
4732
4733                 if (skb == NULL) {
4734                         i++;
4735                         continue;
4736                 }
4737
4738                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4739                         skb_headlen(skb), PCI_DMA_TODEVICE);
4740
4741                 tx_buf->skb = NULL;
4742
4743                 last = skb_shinfo(skb)->nr_frags;
4744                 for (j = 0; j < last; j++) {
4745                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4746                         pci_unmap_page(bp->pdev,
4747                                 pci_unmap_addr(tx_buf, mapping),
4748                                 skb_shinfo(skb)->frags[j].size,
4749                                 PCI_DMA_TODEVICE);
4750                 }
4751                 dev_kfree_skb(skb);
4752                 i += j + 1;
4753         }
4754
4755 }
4756
4757 static void
4758 bnx2_free_rx_skbs(struct bnx2 *bp)
4759 {
4760         int i;
4761
4762         if (bp->rx_buf_ring == NULL)
4763                 return;
4764
4765         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4766                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4767                 struct sk_buff *skb = rx_buf->skb;
4768
4769                 if (skb == NULL)
4770                         continue;
4771
4772                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4773                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4774
4775                 rx_buf->skb = NULL;
4776
4777                 dev_kfree_skb(skb);
4778         }
4779         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4780                 bnx2_free_rx_page(bp, i);
4781 }
4782
4783 static void
4784 bnx2_free_skbs(struct bnx2 *bp)
4785 {
4786         bnx2_free_tx_skbs(bp);
4787         bnx2_free_rx_skbs(bp);
4788 }
4789
4790 static int
4791 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4792 {
4793         int rc;
4794
4795         rc = bnx2_reset_chip(bp, reset_code);
4796         bnx2_free_skbs(bp);
4797         if (rc)
4798                 return rc;
4799
4800         if ((rc = bnx2_init_chip(bp)) != 0)
4801                 return rc;
4802
4803         bnx2_clear_ring_states(bp);
4804         bnx2_init_tx_ring(bp);
4805         bnx2_init_rx_ring(bp);
4806         return 0;
4807 }
4808
4809 static int
4810 bnx2_init_nic(struct bnx2 *bp)
4811 {
4812         int rc;
4813
4814         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4815                 return rc;
4816
4817         spin_lock_bh(&bp->phy_lock);
4818         bnx2_init_phy(bp);
4819         bnx2_set_link(bp);
4820         spin_unlock_bh(&bp->phy_lock);
4821         return 0;
4822 }
4823
4824 static int
4825 bnx2_test_registers(struct bnx2 *bp)
4826 {
4827         int ret;
4828         int i, is_5709;
4829         static const struct {
4830                 u16   offset;
4831                 u16   flags;
4832 #define BNX2_FL_NOT_5709        1
4833                 u32   rw_mask;
4834                 u32   ro_mask;
4835         } reg_tbl[] = {
4836                 { 0x006c, 0, 0x00000000, 0x0000003f },
4837                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4838                 { 0x0094, 0, 0x00000000, 0x00000000 },
4839
4840                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4841                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4842                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4843                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4844                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4845                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4846                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4847                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4848                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4849
4850                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4851                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4852                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4853                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4854                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4855                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4856
4857                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4858                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4859                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4860
4861                 { 0x1000, 0, 0x00000000, 0x00000001 },
4862                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4863
4864                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4865                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4866                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4867                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4868                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4869                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4870                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4871                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4872                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4873                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4874
4875                 { 0x1800, 0, 0x00000000, 0x00000001 },
4876                 { 0x1804, 0, 0x00000000, 0x00000003 },
4877
4878                 { 0x2800, 0, 0x00000000, 0x00000001 },
4879                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4880                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4881                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4882                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4883                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4884                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4885                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4886                 { 0x2840, 0, 0x00000000, 0xffffffff },
4887                 { 0x2844, 0, 0x00000000, 0xffffffff },
4888                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4889                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4890
4891                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4892                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4893
4894                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4895                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4896                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4897                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4898                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4899                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4900                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4901                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4902                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4903
4904                 { 0x5004, 0, 0x00000000, 0x0000007f },
4905                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4906
4907                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4908                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4909                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4910                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4911                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4912                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4913                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4914                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4915                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4916
4917                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4918                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4919                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4920                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4921                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4922                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4923                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4924                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4925                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4926                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4927                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4928                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4929                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4930                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4931                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4932                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4933                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4934                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4935                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4936                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4937                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4938                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4939                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4940
4941                 { 0xffff, 0, 0x00000000, 0x00000000 },
4942         };
4943
4944         ret = 0;
4945         is_5709 = 0;
4946         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4947                 is_5709 = 1;
4948
4949         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4950                 u32 offset, rw_mask, ro_mask, save_val, val;
4951                 u16 flags = reg_tbl[i].flags;
4952
4953                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4954                         continue;
4955
4956                 offset = (u32) reg_tbl[i].offset;
4957                 rw_mask = reg_tbl[i].rw_mask;
4958                 ro_mask = reg_tbl[i].ro_mask;
4959
4960                 save_val = readl(bp->regview + offset);
4961
4962                 writel(0, bp->regview + offset);
4963
4964                 val = readl(bp->regview + offset);
4965                 if ((val & rw_mask) != 0) {
4966                         goto reg_test_err;
4967                 }
4968
4969                 if ((val & ro_mask) != (save_val & ro_mask)) {
4970                         goto reg_test_err;
4971                 }
4972
4973                 writel(0xffffffff, bp->regview + offset);
4974
4975                 val = readl(bp->regview + offset);
4976                 if ((val & rw_mask) != rw_mask) {
4977                         goto reg_test_err;
4978                 }
4979
4980                 if ((val & ro_mask) != (save_val & ro_mask)) {
4981                         goto reg_test_err;
4982                 }
4983
4984                 writel(save_val, bp->regview + offset);
4985                 continue;
4986
4987 reg_test_err:
4988                 writel(save_val, bp->regview + offset);
4989                 ret = -ENODEV;
4990                 break;
4991         }
4992         return ret;
4993 }
4994
4995 static int
4996 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4997 {
4998         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4999                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5000         int i;
5001
5002         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5003                 u32 offset;
5004
5005                 for (offset = 0; offset < size; offset += 4) {
5006
5007                         REG_WR_IND(bp, start + offset, test_pattern[i]);
5008
5009                         if (REG_RD_IND(bp, start + offset) !=
5010                                 test_pattern[i]) {
5011                                 return -ENODEV;
5012                         }
5013                 }
5014         }
5015         return 0;
5016 }
5017
5018 static int
5019 bnx2_test_memory(struct bnx2 *bp)
5020 {
5021         int ret = 0;
5022         int i;
5023         static struct mem_entry {
5024                 u32   offset;
5025                 u32   len;
5026         } mem_tbl_5706[] = {
5027                 { 0x60000,  0x4000 },
5028                 { 0xa0000,  0x3000 },
5029                 { 0xe0000,  0x4000 },
5030                 { 0x120000, 0x4000 },
5031                 { 0x1a0000, 0x4000 },
5032                 { 0x160000, 0x4000 },
5033                 { 0xffffffff, 0    },
5034         },
5035         mem_tbl_5709[] = {
5036                 { 0x60000,  0x4000 },
5037                 { 0xa0000,  0x3000 },
5038                 { 0xe0000,  0x4000 },
5039                 { 0x120000, 0x4000 },
5040                 { 0x1a0000, 0x4000 },
5041                 { 0xffffffff, 0    },
5042         };
5043         struct mem_entry *mem_tbl;
5044
5045         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5046                 mem_tbl = mem_tbl_5709;
5047         else
5048                 mem_tbl = mem_tbl_5706;
5049
5050         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5051                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5052                         mem_tbl[i].len)) != 0) {
5053                         return ret;
5054                 }
5055         }
5056
5057         return ret;
5058 }
5059
5060 #define BNX2_MAC_LOOPBACK       0
5061 #define BNX2_PHY_LOOPBACK       1
5062
5063 static int
5064 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5065 {
5066         unsigned int pkt_size, num_pkts, i;
5067         struct sk_buff *skb, *rx_skb;
5068         unsigned char *packet;
5069         u16 rx_start_idx, rx_idx;
5070         dma_addr_t map;
5071         struct tx_bd *txbd;
5072         struct sw_bd *rx_buf;
5073         struct l2_fhdr *rx_hdr;
5074         int ret = -ENODEV;
5075         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5076
5077         tx_napi = bnapi;
5078         if (bp->flags & BNX2_FLAG_USING_MSIX)
5079                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5080
5081         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5082                 bp->loopback = MAC_LOOPBACK;
5083                 bnx2_set_mac_loopback(bp);
5084         }
5085         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5086                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5087                         return 0;
5088
5089                 bp->loopback = PHY_LOOPBACK;
5090                 bnx2_set_phy_loopback(bp);
5091         }
5092         else
5093                 return -EINVAL;
5094
5095         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5096         skb = netdev_alloc_skb(bp->dev, pkt_size);
5097         if (!skb)
5098                 return -ENOMEM;
5099         packet = skb_put(skb, pkt_size);
5100         memcpy(packet, bp->dev->dev_addr, 6);
5101         memset(packet + 6, 0x0, 8);
5102         for (i = 14; i < pkt_size; i++)
5103                 packet[i] = (unsigned char) (i & 0xff);
5104
5105         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5106                 PCI_DMA_TODEVICE);
5107
5108         REG_WR(bp, BNX2_HC_COMMAND,
5109                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5110
5111         REG_RD(bp, BNX2_HC_COMMAND);
5112
5113         udelay(5);
5114         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5115
5116         num_pkts = 0;
5117
5118         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5119
5120         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5121         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5122         txbd->tx_bd_mss_nbytes = pkt_size;
5123         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5124
5125         num_pkts++;
5126         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5127         bp->tx_prod_bseq += pkt_size;
5128
5129         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5130         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5131
5132         udelay(100);
5133
5134         REG_WR(bp, BNX2_HC_COMMAND,
5135                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5136
5137         REG_RD(bp, BNX2_HC_COMMAND);
5138
5139         udelay(5);
5140
5141         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5142         dev_kfree_skb(skb);
5143
5144         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5145                 goto loopback_test_done;
5146
5147         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5148         if (rx_idx != rx_start_idx + num_pkts) {
5149                 goto loopback_test_done;
5150         }
5151
5152         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5153         rx_skb = rx_buf->skb;
5154
5155         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5156         skb_reserve(rx_skb, bp->rx_offset);
5157
5158         pci_dma_sync_single_for_cpu(bp->pdev,
5159                 pci_unmap_addr(rx_buf, mapping),
5160                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5161
5162         if (rx_hdr->l2_fhdr_status &
5163                 (L2_FHDR_ERRORS_BAD_CRC |
5164                 L2_FHDR_ERRORS_PHY_DECODE |
5165                 L2_FHDR_ERRORS_ALIGNMENT |
5166                 L2_FHDR_ERRORS_TOO_SHORT |
5167                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5168
5169                 goto loopback_test_done;
5170         }
5171
5172         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5173                 goto loopback_test_done;
5174         }
5175
5176         for (i = 14; i < pkt_size; i++) {
5177                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5178                         goto loopback_test_done;
5179                 }
5180         }
5181
5182         ret = 0;
5183
5184 loopback_test_done:
5185         bp->loopback = 0;
5186         return ret;
5187 }
5188
5189 #define BNX2_MAC_LOOPBACK_FAILED        1
5190 #define BNX2_PHY_LOOPBACK_FAILED        2
5191 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5192                                          BNX2_PHY_LOOPBACK_FAILED)
5193
5194 static int
5195 bnx2_test_loopback(struct bnx2 *bp)
5196 {
5197         int rc = 0;
5198
5199         if (!netif_running(bp->dev))
5200                 return BNX2_LOOPBACK_FAILED;
5201
5202         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5203         spin_lock_bh(&bp->phy_lock);
5204         bnx2_init_phy(bp);
5205         spin_unlock_bh(&bp->phy_lock);
5206         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5207                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5208         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5209                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5210         return rc;
5211 }
5212
5213 #define NVRAM_SIZE 0x200
5214 #define CRC32_RESIDUAL 0xdebb20e3
5215
5216 static int
5217 bnx2_test_nvram(struct bnx2 *bp)
5218 {
5219         u32 buf[NVRAM_SIZE / 4];
5220         u8 *data = (u8 *) buf;
5221         int rc = 0;
5222         u32 magic, csum;
5223
5224         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5225                 goto test_nvram_done;
5226
5227         magic = be32_to_cpu(buf[0]);
5228         if (magic != 0x669955aa) {
5229                 rc = -ENODEV;
5230                 goto test_nvram_done;
5231         }
5232
5233         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5234                 goto test_nvram_done;
5235
5236         csum = ether_crc_le(0x100, data);
5237         if (csum != CRC32_RESIDUAL) {
5238                 rc = -ENODEV;
5239                 goto test_nvram_done;
5240         }
5241
5242         csum = ether_crc_le(0x100, data + 0x100);
5243         if (csum != CRC32_RESIDUAL) {
5244                 rc = -ENODEV;
5245         }
5246
5247 test_nvram_done:
5248         return rc;
5249 }
5250
5251 static int
5252 bnx2_test_link(struct bnx2 *bp)
5253 {
5254         u32 bmsr;
5255
5256         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5257                 if (bp->link_up)
5258                         return 0;
5259                 return -ENODEV;
5260         }
5261         spin_lock_bh(&bp->phy_lock);
5262         bnx2_enable_bmsr1(bp);
5263         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5264         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5265         bnx2_disable_bmsr1(bp);
5266         spin_unlock_bh(&bp->phy_lock);
5267
5268         if (bmsr & BMSR_LSTATUS) {
5269                 return 0;
5270         }
5271         return -ENODEV;
5272 }
5273
5274 static int
5275 bnx2_test_intr(struct bnx2 *bp)
5276 {
5277         int i;
5278         u16 status_idx;
5279
5280         if (!netif_running(bp->dev))
5281                 return -ENODEV;
5282
5283         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5284
5285         /* This register is not touched during run-time. */
5286         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5287         REG_RD(bp, BNX2_HC_COMMAND);
5288
5289         for (i = 0; i < 10; i++) {
5290                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5291                         status_idx) {
5292
5293                         break;
5294                 }
5295
5296                 msleep_interruptible(10);
5297         }
5298         if (i < 10)
5299                 return 0;
5300
5301         return -ENODEV;
5302 }
5303
5304 static int
5305 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5306 {
5307         u32 mode_ctl, an_dbg, exp;
5308
5309         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5310         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5311
5312         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5313                 return 0;
5314
5315         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5316         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5317         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5318
5319         if (an_dbg & MISC_SHDW_AN_DBG_NOSYNC)
5320                 return 0;
5321
5322         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5323         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5324         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5325
5326         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5327                 return 0;
5328
5329         return 1;
5330 }
5331
5332 static void
5333 bnx2_5706_serdes_timer(struct bnx2 *bp)
5334 {
5335         int check_link = 1;
5336
5337         spin_lock(&bp->phy_lock);
5338         if (bp->phy_flags & PHY_FORCED_DOWN_FLAG) {
5339                 bnx2_5706s_force_link_dn(bp, 0);
5340                 bp->phy_flags &= ~PHY_FORCED_DOWN_FLAG;
5341                 spin_unlock(&bp->phy_lock);
5342                 return;
5343         }
5344
5345         if (bp->serdes_an_pending) {
5346                 bp->serdes_an_pending--;
5347                 check_link = 0;
5348         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5349                 u32 bmcr;
5350
5351                 bp->current_interval = bp->timer_interval;
5352
5353                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5354
5355                 if (bmcr & BMCR_ANENABLE) {
5356                         if (bnx2_5706_serdes_has_link(bp)) {
5357                                 bmcr &= ~BMCR_ANENABLE;
5358                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5359                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5360                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5361                         }
5362                 }
5363         }
5364         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5365                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5366                 u32 phy2;
5367
5368                 check_link = 0;
5369                 bnx2_write_phy(bp, 0x17, 0x0f01);
5370                 bnx2_read_phy(bp, 0x15, &phy2);
5371                 if (phy2 & 0x20) {
5372                         u32 bmcr;
5373
5374                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5375                         bmcr |= BMCR_ANENABLE;
5376                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5377
5378                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5379                 }
5380         } else
5381                 bp->current_interval = bp->timer_interval;
5382
5383         if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5384                 u32 val;
5385
5386                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5387                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5388                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5389
5390                 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5391                         bnx2_5706s_force_link_dn(bp, 1);
5392                         bp->phy_flags |= PHY_FORCED_DOWN_FLAG;
5393                 }
5394         }
5395         spin_unlock(&bp->phy_lock);
5396 }
5397
5398 static void
5399 bnx2_5708_serdes_timer(struct bnx2 *bp)
5400 {
5401         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5402                 return;
5403
5404         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5405                 bp->serdes_an_pending = 0;
5406                 return;
5407         }
5408
5409         spin_lock(&bp->phy_lock);
5410         if (bp->serdes_an_pending)
5411                 bp->serdes_an_pending--;
5412         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5413                 u32 bmcr;
5414
5415                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5416                 if (bmcr & BMCR_ANENABLE) {
5417                         bnx2_enable_forced_2g5(bp);
5418                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5419                 } else {
5420                         bnx2_disable_forced_2g5(bp);
5421                         bp->serdes_an_pending = 2;
5422                         bp->current_interval = bp->timer_interval;
5423                 }
5424
5425         } else
5426                 bp->current_interval = bp->timer_interval;
5427
5428         spin_unlock(&bp->phy_lock);
5429 }
5430
5431 static void
5432 bnx2_timer(unsigned long data)
5433 {
5434         struct bnx2 *bp = (struct bnx2 *) data;
5435
5436         if (!netif_running(bp->dev))
5437                 return;
5438
5439         if (atomic_read(&bp->intr_sem) != 0)
5440                 goto bnx2_restart_timer;
5441
5442         bnx2_send_heart_beat(bp);
5443
5444         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5445
5446         /* workaround occasional corrupted counters */
5447         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5448                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5449                                             BNX2_HC_COMMAND_STATS_NOW);
5450
5451         if (bp->phy_flags & PHY_SERDES_FLAG) {
5452                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5453                         bnx2_5706_serdes_timer(bp);
5454                 else
5455                         bnx2_5708_serdes_timer(bp);
5456         }
5457
5458 bnx2_restart_timer:
5459         mod_timer(&bp->timer, jiffies + bp->current_interval);
5460 }
5461
5462 static int
5463 bnx2_request_irq(struct bnx2 *bp)
5464 {
5465         struct net_device *dev = bp->dev;
5466         unsigned long flags;
5467         struct bnx2_irq *irq;
5468         int rc = 0, i;
5469
5470         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5471                 flags = 0;
5472         else
5473                 flags = IRQF_SHARED;
5474
5475         for (i = 0; i < bp->irq_nvecs; i++) {
5476                 irq = &bp->irq_tbl[i];
5477                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5478                                  dev);
5479                 if (rc)
5480                         break;
5481                 irq->requested = 1;
5482         }
5483         return rc;
5484 }
5485
5486 static void
5487 bnx2_free_irq(struct bnx2 *bp)
5488 {
5489         struct net_device *dev = bp->dev;
5490         struct bnx2_irq *irq;
5491         int i;
5492
5493         for (i = 0; i < bp->irq_nvecs; i++) {
5494                 irq = &bp->irq_tbl[i];
5495                 if (irq->requested)
5496                         free_irq(irq->vector, dev);
5497                 irq->requested = 0;
5498         }
5499         if (bp->flags & BNX2_FLAG_USING_MSI)
5500                 pci_disable_msi(bp->pdev);
5501         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5502                 pci_disable_msix(bp->pdev);
5503
5504         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5505 }
5506
5507 static void
5508 bnx2_enable_msix(struct bnx2 *bp)
5509 {
5510         int i, rc;
5511         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5512
5513         bnx2_setup_msix_tbl(bp);
5514         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5515         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5516         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5517
5518         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5519                 msix_ent[i].entry = i;
5520                 msix_ent[i].vector = 0;
5521         }
5522
5523         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5524         if (rc != 0)
5525                 return;
5526
5527         bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5528         bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5529
5530         strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5531         strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5532         strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5533         strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5534
5535         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5536         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5537         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5538                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5539 }
5540
5541 static void
5542 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5543 {
5544         bp->irq_tbl[0].handler = bnx2_interrupt;
5545         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5546         bp->irq_nvecs = 1;
5547         bp->irq_tbl[0].vector = bp->pdev->irq;
5548
5549         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5550                 bnx2_enable_msix(bp);
5551
5552         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5553             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5554                 if (pci_enable_msi(bp->pdev) == 0) {
5555                         bp->flags |= BNX2_FLAG_USING_MSI;
5556                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5557                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5558                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5559                         } else
5560                                 bp->irq_tbl[0].handler = bnx2_msi;
5561
5562                         bp->irq_tbl[0].vector = bp->pdev->irq;
5563                 }
5564         }
5565 }
5566
5567 /* Called with rtnl_lock */
5568 static int
5569 bnx2_open(struct net_device *dev)
5570 {
5571         struct bnx2 *bp = netdev_priv(dev);
5572         int rc;
5573
5574         netif_carrier_off(dev);
5575
5576         bnx2_set_power_state(bp, PCI_D0);
5577         bnx2_disable_int(bp);
5578
5579         rc = bnx2_alloc_mem(bp);
5580         if (rc)
5581                 return rc;
5582
5583         bnx2_setup_int_mode(bp, disable_msi);
5584         bnx2_napi_enable(bp);
5585         rc = bnx2_request_irq(bp);
5586
5587         if (rc) {
5588                 bnx2_napi_disable(bp);
5589                 bnx2_free_mem(bp);
5590                 return rc;
5591         }
5592
5593         rc = bnx2_init_nic(bp);
5594
5595         if (rc) {
5596                 bnx2_napi_disable(bp);
5597                 bnx2_free_irq(bp);
5598                 bnx2_free_skbs(bp);
5599                 bnx2_free_mem(bp);
5600                 return rc;
5601         }
5602
5603         mod_timer(&bp->timer, jiffies + bp->current_interval);
5604
5605         atomic_set(&bp->intr_sem, 0);
5606
5607         bnx2_enable_int(bp);
5608
5609         if (bp->flags & BNX2_FLAG_USING_MSI) {
5610                 /* Test MSI to make sure it is working
5611                  * If MSI test fails, go back to INTx mode
5612                  */
5613                 if (bnx2_test_intr(bp) != 0) {
5614                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5615                                " using MSI, switching to INTx mode. Please"
5616                                " report this failure to the PCI maintainer"
5617                                " and include system chipset information.\n",
5618                                bp->dev->name);
5619
5620                         bnx2_disable_int(bp);
5621                         bnx2_free_irq(bp);
5622
5623                         bnx2_setup_int_mode(bp, 1);
5624
5625                         rc = bnx2_init_nic(bp);
5626
5627                         if (!rc)
5628                                 rc = bnx2_request_irq(bp);
5629
5630                         if (rc) {
5631                                 bnx2_napi_disable(bp);
5632                                 bnx2_free_skbs(bp);
5633                                 bnx2_free_mem(bp);
5634                                 del_timer_sync(&bp->timer);
5635                                 return rc;
5636                         }
5637                         bnx2_enable_int(bp);
5638                 }
5639         }
5640         if (bp->flags & BNX2_FLAG_USING_MSI)
5641                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5642         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5643                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5644
5645         netif_start_queue(dev);
5646
5647         return 0;
5648 }
5649
5650 static void
5651 bnx2_reset_task(struct work_struct *work)
5652 {
5653         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5654
5655         if (!netif_running(bp->dev))
5656                 return;
5657
5658         bp->in_reset_task = 1;
5659         bnx2_netif_stop(bp);
5660
5661         bnx2_init_nic(bp);
5662
5663         atomic_set(&bp->intr_sem, 1);
5664         bnx2_netif_start(bp);
5665         bp->in_reset_task = 0;
5666 }
5667
5668 static void
5669 bnx2_tx_timeout(struct net_device *dev)
5670 {
5671         struct bnx2 *bp = netdev_priv(dev);
5672
5673         /* This allows the netif to be shutdown gracefully before resetting */
5674         schedule_work(&bp->reset_task);
5675 }
5676
5677 #ifdef BCM_VLAN
5678 /* Called with rtnl_lock */
5679 static void
5680 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5681 {
5682         struct bnx2 *bp = netdev_priv(dev);
5683
5684         bnx2_netif_stop(bp);
5685
5686         bp->vlgrp = vlgrp;
5687         bnx2_set_rx_mode(dev);
5688
5689         bnx2_netif_start(bp);
5690 }
5691 #endif
5692
5693 /* Called with netif_tx_lock.
5694  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5695  * netif_wake_queue().
5696  */
5697 static int
5698 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5699 {
5700         struct bnx2 *bp = netdev_priv(dev);
5701         dma_addr_t mapping;
5702         struct tx_bd *txbd;
5703         struct sw_bd *tx_buf;
5704         u32 len, vlan_tag_flags, last_frag, mss;
5705         u16 prod, ring_prod;
5706         int i;
5707         struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5708
5709         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5710             (skb_shinfo(skb)->nr_frags + 1))) {
5711                 netif_stop_queue(dev);
5712                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5713                         dev->name);
5714
5715                 return NETDEV_TX_BUSY;
5716         }
5717         len = skb_headlen(skb);
5718         prod = bp->tx_prod;
5719         ring_prod = TX_RING_IDX(prod);
5720
5721         vlan_tag_flags = 0;
5722         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5723                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5724         }
5725
5726         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5727                 vlan_tag_flags |=
5728                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5729         }
5730         if ((mss = skb_shinfo(skb)->gso_size)) {
5731                 u32 tcp_opt_len, ip_tcp_len;
5732                 struct iphdr *iph;
5733
5734                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5735
5736                 tcp_opt_len = tcp_optlen(skb);
5737
5738                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5739                         u32 tcp_off = skb_transport_offset(skb) -
5740                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5741
5742                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5743                                           TX_BD_FLAGS_SW_FLAGS;
5744                         if (likely(tcp_off == 0))
5745                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5746                         else {
5747                                 tcp_off >>= 3;
5748                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5749                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5750                                                   ((tcp_off & 0x10) <<
5751                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5752                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5753                         }
5754                 } else {
5755                         if (skb_header_cloned(skb) &&
5756                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5757                                 dev_kfree_skb(skb);
5758                                 return NETDEV_TX_OK;
5759                         }
5760
5761                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5762
5763                         iph = ip_hdr(skb);
5764                         iph->check = 0;
5765                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5766                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5767                                                                  iph->daddr, 0,
5768                                                                  IPPROTO_TCP,
5769                                                                  0);
5770                         if (tcp_opt_len || (iph->ihl > 5)) {
5771                                 vlan_tag_flags |= ((iph->ihl - 5) +
5772                                                    (tcp_opt_len >> 2)) << 8;
5773                         }
5774                 }
5775         } else
5776                 mss = 0;
5777
5778         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5779
5780         tx_buf = &bp->tx_buf_ring[ring_prod];
5781         tx_buf->skb = skb;
5782         pci_unmap_addr_set(tx_buf, mapping, mapping);
5783
5784         txbd = &bp->tx_desc_ring[ring_prod];
5785
5786         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5787         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5788         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5789         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5790
5791         last_frag = skb_shinfo(skb)->nr_frags;
5792
5793         for (i = 0; i < last_frag; i++) {
5794                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5795
5796                 prod = NEXT_TX_BD(prod);
5797                 ring_prod = TX_RING_IDX(prod);
5798                 txbd = &bp->tx_desc_ring[ring_prod];
5799
5800                 len = frag->size;
5801                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5802                         len, PCI_DMA_TODEVICE);
5803                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5804                                 mapping, mapping);
5805
5806                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5807                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5808                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5809                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5810
5811         }
5812         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5813
5814         prod = NEXT_TX_BD(prod);
5815         bp->tx_prod_bseq += skb->len;
5816
5817         REG_WR16(bp, bp->tx_bidx_addr, prod);
5818         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5819
5820         mmiowb();
5821
5822         bp->tx_prod = prod;
5823         dev->trans_start = jiffies;
5824
5825         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5826                 netif_stop_queue(dev);
5827                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5828                         netif_wake_queue(dev);
5829         }
5830
5831         return NETDEV_TX_OK;
5832 }
5833
5834 /* Called with rtnl_lock */
5835 static int
5836 bnx2_close(struct net_device *dev)
5837 {
5838         struct bnx2 *bp = netdev_priv(dev);
5839         u32 reset_code;
5840
5841         /* Calling flush_scheduled_work() may deadlock because
5842          * linkwatch_event() may be on the workqueue and it will try to get
5843          * the rtnl_lock which we are holding.
5844          */
5845         while (bp->in_reset_task)
5846                 msleep(1);
5847
5848         bnx2_disable_int_sync(bp);
5849         bnx2_napi_disable(bp);
5850         del_timer_sync(&bp->timer);
5851         if (bp->flags & BNX2_FLAG_NO_WOL)
5852                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5853         else if (bp->wol)
5854                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5855         else
5856                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5857         bnx2_reset_chip(bp, reset_code);
5858         bnx2_free_irq(bp);
5859         bnx2_free_skbs(bp);
5860         bnx2_free_mem(bp);
5861         bp->link_up = 0;
5862         netif_carrier_off(bp->dev);
5863         bnx2_set_power_state(bp, PCI_D3hot);
5864         return 0;
5865 }
5866
5867 #define GET_NET_STATS64(ctr)                                    \
5868         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5869         (unsigned long) (ctr##_lo)
5870
5871 #define GET_NET_STATS32(ctr)            \
5872         (ctr##_lo)
5873
5874 #if (BITS_PER_LONG == 64)
5875 #define GET_NET_STATS   GET_NET_STATS64
5876 #else
5877 #define GET_NET_STATS   GET_NET_STATS32
5878 #endif
5879
5880 static struct net_device_stats *
5881 bnx2_get_stats(struct net_device *dev)
5882 {
5883         struct bnx2 *bp = netdev_priv(dev);
5884         struct statistics_block *stats_blk = bp->stats_blk;
5885         struct net_device_stats *net_stats = &bp->net_stats;
5886
5887         if (bp->stats_blk == NULL) {
5888                 return net_stats;
5889         }
5890         net_stats->rx_packets =
5891                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5892                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5893                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5894
5895         net_stats->tx_packets =
5896                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5897                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5898                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5899
5900         net_stats->rx_bytes =
5901                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5902
5903         net_stats->tx_bytes =
5904                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5905
5906         net_stats->multicast =
5907                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5908
5909         net_stats->collisions =
5910                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5911
5912         net_stats->rx_length_errors =
5913                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5914                 stats_blk->stat_EtherStatsOverrsizePkts);
5915
5916         net_stats->rx_over_errors =
5917                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5918
5919         net_stats->rx_frame_errors =
5920                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5921
5922         net_stats->rx_crc_errors =
5923                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5924
5925         net_stats->rx_errors = net_stats->rx_length_errors +
5926                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5927                 net_stats->rx_crc_errors;
5928
5929         net_stats->tx_aborted_errors =
5930                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5931                 stats_blk->stat_Dot3StatsLateCollisions);
5932
5933         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5934             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5935                 net_stats->tx_carrier_errors = 0;
5936         else {
5937                 net_stats->tx_carrier_errors =
5938                         (unsigned long)
5939                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5940         }
5941
5942         net_stats->tx_errors =
5943                 (unsigned long)
5944                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5945                 +
5946                 net_stats->tx_aborted_errors +
5947                 net_stats->tx_carrier_errors;
5948
5949         net_stats->rx_missed_errors =
5950                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5951                 stats_blk->stat_FwRxDrop);
5952
5953         return net_stats;
5954 }
5955
5956 /* All ethtool functions called with rtnl_lock */
5957
5958 static int
5959 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5960 {
5961         struct bnx2 *bp = netdev_priv(dev);
5962         int support_serdes = 0, support_copper = 0;
5963
5964         cmd->supported = SUPPORTED_Autoneg;
5965         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5966                 support_serdes = 1;
5967                 support_copper = 1;
5968         } else if (bp->phy_port == PORT_FIBRE)
5969                 support_serdes = 1;
5970         else
5971                 support_copper = 1;
5972
5973         if (support_serdes) {
5974                 cmd->supported |= SUPPORTED_1000baseT_Full |
5975                         SUPPORTED_FIBRE;
5976                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5977                         cmd->supported |= SUPPORTED_2500baseX_Full;
5978
5979         }
5980         if (support_copper) {
5981                 cmd->supported |= SUPPORTED_10baseT_Half |
5982                         SUPPORTED_10baseT_Full |
5983                         SUPPORTED_100baseT_Half |
5984                         SUPPORTED_100baseT_Full |
5985                         SUPPORTED_1000baseT_Full |
5986                         SUPPORTED_TP;
5987
5988         }
5989
5990         spin_lock_bh(&bp->phy_lock);
5991         cmd->port = bp->phy_port;
5992         cmd->advertising = bp->advertising;
5993
5994         if (bp->autoneg & AUTONEG_SPEED) {
5995                 cmd->autoneg = AUTONEG_ENABLE;
5996         }
5997         else {
5998                 cmd->autoneg = AUTONEG_DISABLE;
5999         }
6000
6001         if (netif_carrier_ok(dev)) {
6002                 cmd->speed = bp->line_speed;
6003                 cmd->duplex = bp->duplex;
6004         }
6005         else {
6006                 cmd->speed = -1;
6007                 cmd->duplex = -1;
6008         }
6009         spin_unlock_bh(&bp->phy_lock);
6010
6011         cmd->transceiver = XCVR_INTERNAL;
6012         cmd->phy_address = bp->phy_addr;
6013
6014         return 0;
6015 }
6016
6017 static int
6018 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6019 {
6020         struct bnx2 *bp = netdev_priv(dev);
6021         u8 autoneg = bp->autoneg;
6022         u8 req_duplex = bp->req_duplex;
6023         u16 req_line_speed = bp->req_line_speed;
6024         u32 advertising = bp->advertising;
6025         int err = -EINVAL;
6026
6027         spin_lock_bh(&bp->phy_lock);
6028
6029         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6030                 goto err_out_unlock;
6031
6032         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
6033                 goto err_out_unlock;
6034
6035         if (cmd->autoneg == AUTONEG_ENABLE) {
6036                 autoneg |= AUTONEG_SPEED;
6037
6038                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6039
6040                 /* allow advertising 1 speed */
6041                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6042                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6043                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6044                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6045
6046                         if (cmd->port == PORT_FIBRE)
6047                                 goto err_out_unlock;
6048
6049                         advertising = cmd->advertising;
6050
6051                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6052          &nb