pkt_sched: Rename PSCHED_US2NS and PSCHED_NS2US
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #include "bnx2.h"
54 #include "bnx2_fw.h"
55
56 #define DRV_MODULE_NAME         "bnx2"
57 #define PFX DRV_MODULE_NAME     ": "
58 #define DRV_MODULE_VERSION      "2.0.1"
59 #define DRV_MODULE_RELDATE      "May 6, 2009"
60 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-4.6.16.fw"
61 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-4.6.16.fw"
62 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-4.6.17.fw"
63 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-4.6.15.fw"
64
65 #define RUN_AT(x) (jiffies + (x))
66
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT  (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77 MODULE_FIRMWARE(FW_MIPS_FILE_06);
78 MODULE_FIRMWARE(FW_RV2P_FILE_06);
79 MODULE_FIRMWARE(FW_MIPS_FILE_09);
80 MODULE_FIRMWARE(FW_RV2P_FILE_09);
81
82 static int disable_msi = 0;
83
84 module_param(disable_msi, int, 0);
85 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86
87 typedef enum {
88         BCM5706 = 0,
89         NC370T,
90         NC370I,
91         BCM5706S,
92         NC370F,
93         BCM5708,
94         BCM5708S,
95         BCM5709,
96         BCM5709S,
97         BCM5716,
98         BCM5716S,
99 } board_t;
100
101 /* indexed by board_t, above */
102 static struct {
103         char *name;
104 } board_info[] __devinitdata = {
105         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106         { "HP NC370T Multifunction Gigabit Server Adapter" },
107         { "HP NC370i Multifunction Gigabit Server Adapter" },
108         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109         { "HP NC370F Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116         };
117
118 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137         { PCI_VENDOR_ID_BROADCOM, 0x163b,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139         { PCI_VENDOR_ID_BROADCOM, 0x163c,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141         { 0, }
142 };
143
144 static struct flash_spec flash_table[] =
145 {
146 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
148         /* Slow EEPROM */
149         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152          "EEPROM - slow"},
153         /* Expansion entry 0001 */
154         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157          "Entry 0001"},
158         /* Saifun SA25F010 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163          "Non-buffered flash (128kB)"},
164         /* Saifun SA25F020 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169          "Non-buffered flash (256kB)"},
170         /* Expansion entry 0100 */
171         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174          "Entry 0100"},
175         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185         /* Saifun SA25F005 (non-buffered flash) */
186         /* strap, cfg1, & write1 need updates */
187         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190          "Non-buffered flash (64kB)"},
191         /* Fast EEPROM */
192         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195          "EEPROM - fast"},
196         /* Expansion entry 1001 */
197         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1001"},
201         /* Expansion entry 1010 */
202         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1010"},
206         /* ATMEL AT45DB011B (buffered flash) */
207         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210          "Buffered flash (128kB)"},
211         /* Expansion entry 1100 */
212         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215          "Entry 1100"},
216         /* Expansion entry 1101 */
217         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1101"},
221         /* Ateml Expansion entry 1110 */
222         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1110 (Atmel)"},
226         /* ATMEL AT45DB021B (buffered flash) */
227         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230          "Buffered flash (256kB)"},
231 };
232
233 static struct flash_spec flash_5709 = {
234         .flags          = BNX2_NV_BUFFERED,
235         .page_bits      = BCM5709_FLASH_PAGE_BITS,
236         .page_size      = BCM5709_FLASH_PAGE_SIZE,
237         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
238         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
239         .name           = "5709 Buffered flash (256kB)",
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243
244 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
245 {
246         u32 diff;
247
248         smp_mb();
249
250         /* The ring uses 256 indices for 255 entries, one of them
251          * needs to be skipped.
252          */
253         diff = txr->tx_prod - txr->tx_cons;
254         if (unlikely(diff >= TX_DESC_CNT)) {
255                 diff &= 0xffff;
256                 if (diff == TX_DESC_CNT)
257                         diff = MAX_TX_DESC_CNT;
258         }
259         return (bp->tx_ring_size - diff);
260 }
261
262 static u32
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265         u32 val;
266
267         spin_lock_bh(&bp->indirect_lock);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
270         spin_unlock_bh(&bp->indirect_lock);
271         return val;
272 }
273
274 static void
275 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
276 {
277         spin_lock_bh(&bp->indirect_lock);
278         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
280         spin_unlock_bh(&bp->indirect_lock);
281 }
282
283 static void
284 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
287 }
288
289 static u32
290 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291 {
292         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
293 }
294
295 static void
296 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297 {
298         offset += cid_addr;
299         spin_lock_bh(&bp->indirect_lock);
300         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
301                 int i;
302
303                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
304                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
305                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
306                 for (i = 0; i < 5; i++) {
307                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
308                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
309                                 break;
310                         udelay(5);
311                 }
312         } else {
313                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
314                 REG_WR(bp, BNX2_CTX_DATA, val);
315         }
316         spin_unlock_bh(&bp->indirect_lock);
317 }
318
319 static int
320 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321 {
322         u32 val1;
323         int i, ret;
324
325         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
326                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328
329                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
330                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331
332                 udelay(40);
333         }
334
335         val1 = (bp->phy_addr << 21) | (reg << 16) |
336                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
337                 BNX2_EMAC_MDIO_COMM_START_BUSY;
338         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
339
340         for (i = 0; i < 50; i++) {
341                 udelay(10);
342
343                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
344                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
345                         udelay(5);
346
347                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
348                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
349
350                         break;
351                 }
352         }
353
354         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
355                 *val = 0x0;
356                 ret = -EBUSY;
357         }
358         else {
359                 *val = val1;
360                 ret = 0;
361         }
362
363         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
364                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370                 udelay(40);
371         }
372
373         return ret;
374 }
375
376 static int
377 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
378 {
379         u32 val1;
380         int i, ret;
381
382         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
383                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
384                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
385
386                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
387                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388
389                 udelay(40);
390         }
391
392         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
393                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
394                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
395         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
396
397         for (i = 0; i < 50; i++) {
398                 udelay(10);
399
400                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
401                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
402                         udelay(5);
403                         break;
404                 }
405         }
406
407         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
408                 ret = -EBUSY;
409         else
410                 ret = 0;
411
412         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
413                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
414                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
415
416                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
417                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
418
419                 udelay(40);
420         }
421
422         return ret;
423 }
424
425 static void
426 bnx2_disable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
434                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
435         }
436         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
437 }
438
439 static void
440 bnx2_enable_int(struct bnx2 *bp)
441 {
442         int i;
443         struct bnx2_napi *bnapi;
444
445         for (i = 0; i < bp->irq_nvecs; i++) {
446                 bnapi = &bp->bnx2_napi[i];
447
448                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
449                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
450                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
451                        bnapi->last_status_idx);
452
453                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
454                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
455                        bnapi->last_status_idx);
456         }
457         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
458 }
459
460 static void
461 bnx2_disable_int_sync(struct bnx2 *bp)
462 {
463         int i;
464
465         atomic_inc(&bp->intr_sem);
466         bnx2_disable_int(bp);
467         for (i = 0; i < bp->irq_nvecs; i++)
468                 synchronize_irq(bp->irq_tbl[i].vector);
469 }
470
471 static void
472 bnx2_napi_disable(struct bnx2 *bp)
473 {
474         int i;
475
476         for (i = 0; i < bp->irq_nvecs; i++)
477                 napi_disable(&bp->bnx2_napi[i].napi);
478 }
479
480 static void
481 bnx2_napi_enable(struct bnx2 *bp)
482 {
483         int i;
484
485         for (i = 0; i < bp->irq_nvecs; i++)
486                 napi_enable(&bp->bnx2_napi[i].napi);
487 }
488
489 static void
490 bnx2_netif_stop(struct bnx2 *bp)
491 {
492         bnx2_disable_int_sync(bp);
493         if (netif_running(bp->dev)) {
494                 bnx2_napi_disable(bp);
495                 netif_tx_disable(bp->dev);
496                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
497         }
498 }
499
500 static void
501 bnx2_netif_start(struct bnx2 *bp)
502 {
503         if (atomic_dec_and_test(&bp->intr_sem)) {
504                 if (netif_running(bp->dev)) {
505                         netif_tx_wake_all_queues(bp->dev);
506                         bnx2_napi_enable(bp);
507                         bnx2_enable_int(bp);
508                 }
509         }
510 }
511
512 static void
513 bnx2_free_tx_mem(struct bnx2 *bp)
514 {
515         int i;
516
517         for (i = 0; i < bp->num_tx_rings; i++) {
518                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
519                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
520
521                 if (txr->tx_desc_ring) {
522                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
523                                             txr->tx_desc_ring,
524                                             txr->tx_desc_mapping);
525                         txr->tx_desc_ring = NULL;
526                 }
527                 kfree(txr->tx_buf_ring);
528                 txr->tx_buf_ring = NULL;
529         }
530 }
531
532 static void
533 bnx2_free_rx_mem(struct bnx2 *bp)
534 {
535         int i;
536
537         for (i = 0; i < bp->num_rx_rings; i++) {
538                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
539                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
540                 int j;
541
542                 for (j = 0; j < bp->rx_max_ring; j++) {
543                         if (rxr->rx_desc_ring[j])
544                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
545                                                     rxr->rx_desc_ring[j],
546                                                     rxr->rx_desc_mapping[j]);
547                         rxr->rx_desc_ring[j] = NULL;
548                 }
549                 vfree(rxr->rx_buf_ring);
550                 rxr->rx_buf_ring = NULL;
551
552                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
553                         if (rxr->rx_pg_desc_ring[j])
554                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
555                                                     rxr->rx_pg_desc_ring[j],
556                                                     rxr->rx_pg_desc_mapping[j]);
557                         rxr->rx_pg_desc_ring[j] = NULL;
558                 }
559                 vfree(rxr->rx_pg_ring);
560                 rxr->rx_pg_ring = NULL;
561         }
562 }
563
564 static int
565 bnx2_alloc_tx_mem(struct bnx2 *bp)
566 {
567         int i;
568
569         for (i = 0; i < bp->num_tx_rings; i++) {
570                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
571                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
572
573                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
574                 if (txr->tx_buf_ring == NULL)
575                         return -ENOMEM;
576
577                 txr->tx_desc_ring =
578                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
579                                              &txr->tx_desc_mapping);
580                 if (txr->tx_desc_ring == NULL)
581                         return -ENOMEM;
582         }
583         return 0;
584 }
585
586 static int
587 bnx2_alloc_rx_mem(struct bnx2 *bp)
588 {
589         int i;
590
591         for (i = 0; i < bp->num_rx_rings; i++) {
592                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
593                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
594                 int j;
595
596                 rxr->rx_buf_ring =
597                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
598                 if (rxr->rx_buf_ring == NULL)
599                         return -ENOMEM;
600
601                 memset(rxr->rx_buf_ring, 0,
602                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
603
604                 for (j = 0; j < bp->rx_max_ring; j++) {
605                         rxr->rx_desc_ring[j] =
606                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
607                                                      &rxr->rx_desc_mapping[j]);
608                         if (rxr->rx_desc_ring[j] == NULL)
609                                 return -ENOMEM;
610
611                 }
612
613                 if (bp->rx_pg_ring_size) {
614                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
615                                                   bp->rx_max_pg_ring);
616                         if (rxr->rx_pg_ring == NULL)
617                                 return -ENOMEM;
618
619                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
620                                bp->rx_max_pg_ring);
621                 }
622
623                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
624                         rxr->rx_pg_desc_ring[j] =
625                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
626                                                 &rxr->rx_pg_desc_mapping[j]);
627                         if (rxr->rx_pg_desc_ring[j] == NULL)
628                                 return -ENOMEM;
629
630                 }
631         }
632         return 0;
633 }
634
635 static void
636 bnx2_free_mem(struct bnx2 *bp)
637 {
638         int i;
639         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
640
641         bnx2_free_tx_mem(bp);
642         bnx2_free_rx_mem(bp);
643
644         for (i = 0; i < bp->ctx_pages; i++) {
645                 if (bp->ctx_blk[i]) {
646                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
647                                             bp->ctx_blk[i],
648                                             bp->ctx_blk_mapping[i]);
649                         bp->ctx_blk[i] = NULL;
650                 }
651         }
652         if (bnapi->status_blk.msi) {
653                 pci_free_consistent(bp->pdev, bp->status_stats_size,
654                                     bnapi->status_blk.msi,
655                                     bp->status_blk_mapping);
656                 bnapi->status_blk.msi = NULL;
657                 bp->stats_blk = NULL;
658         }
659 }
660
661 static int
662 bnx2_alloc_mem(struct bnx2 *bp)
663 {
664         int i, status_blk_size, err;
665         struct bnx2_napi *bnapi;
666         void *status_blk;
667
668         /* Combine status and statistics blocks into one allocation. */
669         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
670         if (bp->flags & BNX2_FLAG_MSIX_CAP)
671                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
672                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
673         bp->status_stats_size = status_blk_size +
674                                 sizeof(struct statistics_block);
675
676         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
677                                           &bp->status_blk_mapping);
678         if (status_blk == NULL)
679                 goto alloc_mem_err;
680
681         memset(status_blk, 0, bp->status_stats_size);
682
683         bnapi = &bp->bnx2_napi[0];
684         bnapi->status_blk.msi = status_blk;
685         bnapi->hw_tx_cons_ptr =
686                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
687         bnapi->hw_rx_cons_ptr =
688                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
689         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
690                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
691                         struct status_block_msix *sblk;
692
693                         bnapi = &bp->bnx2_napi[i];
694
695                         sblk = (void *) (status_blk +
696                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
697                         bnapi->status_blk.msix = sblk;
698                         bnapi->hw_tx_cons_ptr =
699                                 &sblk->status_tx_quick_consumer_index;
700                         bnapi->hw_rx_cons_ptr =
701                                 &sblk->status_rx_quick_consumer_index;
702                         bnapi->int_num = i << 24;
703                 }
704         }
705
706         bp->stats_blk = status_blk + status_blk_size;
707
708         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
709
710         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
711                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
712                 if (bp->ctx_pages == 0)
713                         bp->ctx_pages = 1;
714                 for (i = 0; i < bp->ctx_pages; i++) {
715                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
716                                                 BCM_PAGE_SIZE,
717                                                 &bp->ctx_blk_mapping[i]);
718                         if (bp->ctx_blk[i] == NULL)
719                                 goto alloc_mem_err;
720                 }
721         }
722
723         err = bnx2_alloc_rx_mem(bp);
724         if (err)
725                 goto alloc_mem_err;
726
727         err = bnx2_alloc_tx_mem(bp);
728         if (err)
729                 goto alloc_mem_err;
730
731         return 0;
732
733 alloc_mem_err:
734         bnx2_free_mem(bp);
735         return -ENOMEM;
736 }
737
738 static void
739 bnx2_report_fw_link(struct bnx2 *bp)
740 {
741         u32 fw_link_status = 0;
742
743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
744                 return;
745
746         if (bp->link_up) {
747                 u32 bmsr;
748
749                 switch (bp->line_speed) {
750                 case SPEED_10:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
755                         break;
756                 case SPEED_100:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
761                         break;
762                 case SPEED_1000:
763                         if (bp->duplex == DUPLEX_HALF)
764                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
765                         else
766                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
767                         break;
768                 case SPEED_2500:
769                         if (bp->duplex == DUPLEX_HALF)
770                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
771                         else
772                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
773                         break;
774                 }
775
776                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
777
778                 if (bp->autoneg) {
779                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
780
781                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
782                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
783
784                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
785                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
786                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
787                         else
788                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
789                 }
790         }
791         else
792                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
793
794         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
795 }
796
797 static char *
798 bnx2_xceiver_str(struct bnx2 *bp)
799 {
800         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
801                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
802                  "Copper"));
803 }
804
805 static void
806 bnx2_report_link(struct bnx2 *bp)
807 {
808         if (bp->link_up) {
809                 netif_carrier_on(bp->dev);
810                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
811                        bnx2_xceiver_str(bp));
812
813                 printk("%d Mbps ", bp->line_speed);
814
815                 if (bp->duplex == DUPLEX_FULL)
816                         printk("full duplex");
817                 else
818                         printk("half duplex");
819
820                 if (bp->flow_ctrl) {
821                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
822                                 printk(", receive ");
823                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
824                                         printk("& transmit ");
825                         }
826                         else {
827                                 printk(", transmit ");
828                         }
829                         printk("flow control ON");
830                 }
831                 printk("\n");
832         }
833         else {
834                 netif_carrier_off(bp->dev);
835                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
836                        bnx2_xceiver_str(bp));
837         }
838
839         bnx2_report_fw_link(bp);
840 }
841
842 static void
843 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
844 {
845         u32 local_adv, remote_adv;
846
847         bp->flow_ctrl = 0;
848         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
849                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
850
851                 if (bp->duplex == DUPLEX_FULL) {
852                         bp->flow_ctrl = bp->req_flow_ctrl;
853                 }
854                 return;
855         }
856
857         if (bp->duplex != DUPLEX_FULL) {
858                 return;
859         }
860
861         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
862             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
863                 u32 val;
864
865                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
866                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
867                         bp->flow_ctrl |= FLOW_CTRL_TX;
868                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
869                         bp->flow_ctrl |= FLOW_CTRL_RX;
870                 return;
871         }
872
873         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
874         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
875
876         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
877                 u32 new_local_adv = 0;
878                 u32 new_remote_adv = 0;
879
880                 if (local_adv & ADVERTISE_1000XPAUSE)
881                         new_local_adv |= ADVERTISE_PAUSE_CAP;
882                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
883                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
884                 if (remote_adv & ADVERTISE_1000XPAUSE)
885                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
886                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
887                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
888
889                 local_adv = new_local_adv;
890                 remote_adv = new_remote_adv;
891         }
892
893         /* See Table 28B-3 of 802.3ab-1999 spec. */
894         if (local_adv & ADVERTISE_PAUSE_CAP) {
895                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
900                                 bp->flow_ctrl = FLOW_CTRL_RX;
901                         }
902                 }
903                 else {
904                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
905                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
906                         }
907                 }
908         }
909         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
910                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
911                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
912
913                         bp->flow_ctrl = FLOW_CTRL_TX;
914                 }
915         }
916 }
917
918 static int
919 bnx2_5709s_linkup(struct bnx2 *bp)
920 {
921         u32 val, speed;
922
923         bp->link_up = 1;
924
925         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
926         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
927         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
928
929         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
930                 bp->line_speed = bp->req_line_speed;
931                 bp->duplex = bp->req_duplex;
932                 return 0;
933         }
934         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
935         switch (speed) {
936                 case MII_BNX2_GP_TOP_AN_SPEED_10:
937                         bp->line_speed = SPEED_10;
938                         break;
939                 case MII_BNX2_GP_TOP_AN_SPEED_100:
940                         bp->line_speed = SPEED_100;
941                         break;
942                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
943                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
944                         bp->line_speed = SPEED_1000;
945                         break;
946                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
947                         bp->line_speed = SPEED_2500;
948                         break;
949         }
950         if (val & MII_BNX2_GP_TOP_AN_FD)
951                 bp->duplex = DUPLEX_FULL;
952         else
953                 bp->duplex = DUPLEX_HALF;
954         return 0;
955 }
956
957 static int
958 bnx2_5708s_linkup(struct bnx2 *bp)
959 {
960         u32 val;
961
962         bp->link_up = 1;
963         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
964         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
965                 case BCM5708S_1000X_STAT1_SPEED_10:
966                         bp->line_speed = SPEED_10;
967                         break;
968                 case BCM5708S_1000X_STAT1_SPEED_100:
969                         bp->line_speed = SPEED_100;
970                         break;
971                 case BCM5708S_1000X_STAT1_SPEED_1G:
972                         bp->line_speed = SPEED_1000;
973                         break;
974                 case BCM5708S_1000X_STAT1_SPEED_2G5:
975                         bp->line_speed = SPEED_2500;
976                         break;
977         }
978         if (val & BCM5708S_1000X_STAT1_FD)
979                 bp->duplex = DUPLEX_FULL;
980         else
981                 bp->duplex = DUPLEX_HALF;
982
983         return 0;
984 }
985
986 static int
987 bnx2_5706s_linkup(struct bnx2 *bp)
988 {
989         u32 bmcr, local_adv, remote_adv, common;
990
991         bp->link_up = 1;
992         bp->line_speed = SPEED_1000;
993
994         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
995         if (bmcr & BMCR_FULLDPLX) {
996                 bp->duplex = DUPLEX_FULL;
997         }
998         else {
999                 bp->duplex = DUPLEX_HALF;
1000         }
1001
1002         if (!(bmcr & BMCR_ANENABLE)) {
1003                 return 0;
1004         }
1005
1006         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1007         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1008
1009         common = local_adv & remote_adv;
1010         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1011
1012                 if (common & ADVERTISE_1000XFULL) {
1013                         bp->duplex = DUPLEX_FULL;
1014                 }
1015                 else {
1016                         bp->duplex = DUPLEX_HALF;
1017                 }
1018         }
1019
1020         return 0;
1021 }
1022
1023 static int
1024 bnx2_copper_linkup(struct bnx2 *bp)
1025 {
1026         u32 bmcr;
1027
1028         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1029         if (bmcr & BMCR_ANENABLE) {
1030                 u32 local_adv, remote_adv, common;
1031
1032                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1033                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1034
1035                 common = local_adv & (remote_adv >> 2);
1036                 if (common & ADVERTISE_1000FULL) {
1037                         bp->line_speed = SPEED_1000;
1038                         bp->duplex = DUPLEX_FULL;
1039                 }
1040                 else if (common & ADVERTISE_1000HALF) {
1041                         bp->line_speed = SPEED_1000;
1042                         bp->duplex = DUPLEX_HALF;
1043                 }
1044                 else {
1045                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047
1048                         common = local_adv & remote_adv;
1049                         if (common & ADVERTISE_100FULL) {
1050                                 bp->line_speed = SPEED_100;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_100HALF) {
1054                                 bp->line_speed = SPEED_100;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else if (common & ADVERTISE_10FULL) {
1058                                 bp->line_speed = SPEED_10;
1059                                 bp->duplex = DUPLEX_FULL;
1060                         }
1061                         else if (common & ADVERTISE_10HALF) {
1062                                 bp->line_speed = SPEED_10;
1063                                 bp->duplex = DUPLEX_HALF;
1064                         }
1065                         else {
1066                                 bp->line_speed = 0;
1067                                 bp->link_up = 0;
1068                         }
1069                 }
1070         }
1071         else {
1072                 if (bmcr & BMCR_SPEED100) {
1073                         bp->line_speed = SPEED_100;
1074                 }
1075                 else {
1076                         bp->line_speed = SPEED_10;
1077                 }
1078                 if (bmcr & BMCR_FULLDPLX) {
1079                         bp->duplex = DUPLEX_FULL;
1080                 }
1081                 else {
1082                         bp->duplex = DUPLEX_HALF;
1083                 }
1084         }
1085
1086         return 0;
1087 }
1088
1089 static void
1090 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1091 {
1092         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1093
1094         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1095         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1096         val |= 0x02 << 8;
1097
1098         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1099                 u32 lo_water, hi_water;
1100
1101                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1102                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1103                 else
1104                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1105                 if (lo_water >= bp->rx_ring_size)
1106                         lo_water = 0;
1107
1108                 hi_water = bp->rx_ring_size / 4;
1109
1110                 if (hi_water <= lo_water)
1111                         lo_water = 0;
1112
1113                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1114                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1115
1116                 if (hi_water > 0xf)
1117                         hi_water = 0xf;
1118                 else if (hi_water == 0)
1119                         lo_water = 0;
1120                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1121         }
1122         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1123 }
1124
1125 static void
1126 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1127 {
1128         int i;
1129         u32 cid;
1130
1131         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1132                 if (i == 1)
1133                         cid = RX_RSS_CID;
1134                 bnx2_init_rx_context(bp, cid);
1135         }
1136 }
1137
1138 static void
1139 bnx2_set_mac_link(struct bnx2 *bp)
1140 {
1141         u32 val;
1142
1143         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1144         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1145                 (bp->duplex == DUPLEX_HALF)) {
1146                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1147         }
1148
1149         /* Configure the EMAC mode register. */
1150         val = REG_RD(bp, BNX2_EMAC_MODE);
1151
1152         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1153                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1154                 BNX2_EMAC_MODE_25G_MODE);
1155
1156         if (bp->link_up) {
1157                 switch (bp->line_speed) {
1158                         case SPEED_10:
1159                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1160                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1161                                         break;
1162                                 }
1163                                 /* fall through */
1164                         case SPEED_100:
1165                                 val |= BNX2_EMAC_MODE_PORT_MII;
1166                                 break;
1167                         case SPEED_2500:
1168                                 val |= BNX2_EMAC_MODE_25G_MODE;
1169                                 /* fall through */
1170                         case SPEED_1000:
1171                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1172                                 break;
1173                 }
1174         }
1175         else {
1176                 val |= BNX2_EMAC_MODE_PORT_GMII;
1177         }
1178
1179         /* Set the MAC to operate in the appropriate duplex mode. */
1180         if (bp->duplex == DUPLEX_HALF)
1181                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1182         REG_WR(bp, BNX2_EMAC_MODE, val);
1183
1184         /* Enable/disable rx PAUSE. */
1185         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_RX)
1188                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1190
1191         /* Enable/disable tx PAUSE. */
1192         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1193         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1194
1195         if (bp->flow_ctrl & FLOW_CTRL_TX)
1196                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1197         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1198
1199         /* Acknowledge the interrupt. */
1200         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1201
1202         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1203                 bnx2_init_all_rx_contexts(bp);
1204 }
1205
1206 static void
1207 bnx2_enable_bmsr1(struct bnx2 *bp)
1208 {
1209         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1210             (CHIP_NUM(bp) == CHIP_NUM_5709))
1211                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1212                                MII_BNX2_BLK_ADDR_GP_STATUS);
1213 }
1214
1215 static void
1216 bnx2_disable_bmsr1(struct bnx2 *bp)
1217 {
1218         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1219             (CHIP_NUM(bp) == CHIP_NUM_5709))
1220                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1222 }
1223
1224 static int
1225 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1226 {
1227         u32 up1;
1228         int ret = 1;
1229
1230         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1231                 return 0;
1232
1233         if (bp->autoneg & AUTONEG_SPEED)
1234                 bp->advertising |= ADVERTISED_2500baseX_Full;
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1238
1239         bnx2_read_phy(bp, bp->mii_up1, &up1);
1240         if (!(up1 & BCM5708S_UP1_2G5)) {
1241                 up1 |= BCM5708S_UP1_2G5;
1242                 bnx2_write_phy(bp, bp->mii_up1, up1);
1243                 ret = 0;
1244         }
1245
1246         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1248                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1249
1250         return ret;
1251 }
1252
1253 static int
1254 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1255 {
1256         u32 up1;
1257         int ret = 0;
1258
1259         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1260                 return 0;
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1264
1265         bnx2_read_phy(bp, bp->mii_up1, &up1);
1266         if (up1 & BCM5708S_UP1_2G5) {
1267                 up1 &= ~BCM5708S_UP1_2G5;
1268                 bnx2_write_phy(bp, bp->mii_up1, up1);
1269                 ret = 1;
1270         }
1271
1272         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1273                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1274                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1275
1276         return ret;
1277 }
1278
1279 static void
1280 bnx2_enable_forced_2g5(struct bnx2 *bp)
1281 {
1282         u32 bmcr;
1283
1284         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1285                 return;
1286
1287         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1288                 u32 val;
1289
1290                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1291                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1292                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1293                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1294                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1295                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1296
1297                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1298                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1299                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1300
1301         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1302                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1303                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1304         }
1305
1306         if (bp->autoneg & AUTONEG_SPEED) {
1307                 bmcr &= ~BMCR_ANENABLE;
1308                 if (bp->req_duplex == DUPLEX_FULL)
1309                         bmcr |= BMCR_FULLDPLX;
1310         }
1311         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1312 }
1313
1314 static void
1315 bnx2_disable_forced_2g5(struct bnx2 *bp)
1316 {
1317         u32 bmcr;
1318
1319         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1320                 return;
1321
1322         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1323                 u32 val;
1324
1325                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1327                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1328                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1329                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1330
1331                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1332                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1333                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1334
1335         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1336                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1337                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1338         }
1339
1340         if (bp->autoneg & AUTONEG_SPEED)
1341                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1342         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1343 }
1344
1345 static void
1346 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1347 {
1348         u32 val;
1349
1350         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1351         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1352         if (start)
1353                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1354         else
1355                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1356 }
1357
1358 static int
1359 bnx2_set_link(struct bnx2 *bp)
1360 {
1361         u32 bmsr;
1362         u8 link_up;
1363
1364         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1365                 bp->link_up = 1;
1366                 return 0;
1367         }
1368
1369         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1370                 return 0;
1371
1372         link_up = bp->link_up;
1373
1374         bnx2_enable_bmsr1(bp);
1375         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1376         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1377         bnx2_disable_bmsr1(bp);
1378
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1381                 u32 val, an_dbg;
1382
1383                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1384                         bnx2_5706s_force_link_dn(bp, 0);
1385                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1386                 }
1387                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1388
1389                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1390                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1391                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1392
1393                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1394                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1395                         bmsr |= BMSR_LSTATUS;
1396                 else
1397                         bmsr &= ~BMSR_LSTATUS;
1398         }
1399
1400         if (bmsr & BMSR_LSTATUS) {
1401                 bp->link_up = 1;
1402
1403                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1404                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405                                 bnx2_5706s_linkup(bp);
1406                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407                                 bnx2_5708s_linkup(bp);
1408                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1409                                 bnx2_5709s_linkup(bp);
1410                 }
1411                 else {
1412                         bnx2_copper_linkup(bp);
1413                 }
1414                 bnx2_resolve_flow_ctrl(bp);
1415         }
1416         else {
1417                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1418                     (bp->autoneg & AUTONEG_SPEED))
1419                         bnx2_disable_forced_2g5(bp);
1420
1421                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1422                         u32 bmcr;
1423
1424                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1425                         bmcr |= BMCR_ANENABLE;
1426                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1427
1428                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1429                 }
1430                 bp->link_up = 0;
1431         }
1432
1433         if (bp->link_up != link_up) {
1434                 bnx2_report_link(bp);
1435         }
1436
1437         bnx2_set_mac_link(bp);
1438
1439         return 0;
1440 }
1441
1442 static int
1443 bnx2_reset_phy(struct bnx2 *bp)
1444 {
1445         int i;
1446         u32 reg;
1447
1448         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1449
1450 #define PHY_RESET_MAX_WAIT 100
1451         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1452                 udelay(10);
1453
1454                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1455                 if (!(reg & BMCR_RESET)) {
1456                         udelay(20);
1457                         break;
1458                 }
1459         }
1460         if (i == PHY_RESET_MAX_WAIT) {
1461                 return -EBUSY;
1462         }
1463         return 0;
1464 }
1465
1466 static u32
1467 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1468 {
1469         u32 adv = 0;
1470
1471         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1472                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1473
1474                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1475                         adv = ADVERTISE_1000XPAUSE;
1476                 }
1477                 else {
1478                         adv = ADVERTISE_PAUSE_CAP;
1479                 }
1480         }
1481         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1482                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1483                         adv = ADVERTISE_1000XPSE_ASYM;
1484                 }
1485                 else {
1486                         adv = ADVERTISE_PAUSE_ASYM;
1487                 }
1488         }
1489         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1490                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1491                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1492                 }
1493                 else {
1494                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1495                 }
1496         }
1497         return adv;
1498 }
1499
1500 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1501
1502 static int
1503 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1504 __releases(&bp->phy_lock)
1505 __acquires(&bp->phy_lock)
1506 {
1507         u32 speed_arg = 0, pause_adv;
1508
1509         pause_adv = bnx2_phy_get_pause_adv(bp);
1510
1511         if (bp->autoneg & AUTONEG_SPEED) {
1512                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1513                 if (bp->advertising & ADVERTISED_10baseT_Half)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1515                 if (bp->advertising & ADVERTISED_10baseT_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1517                 if (bp->advertising & ADVERTISED_100baseT_Half)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1519                 if (bp->advertising & ADVERTISED_100baseT_Full)
1520                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1522                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1523                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1524                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1525         } else {
1526                 if (bp->req_line_speed == SPEED_2500)
1527                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1528                 else if (bp->req_line_speed == SPEED_1000)
1529                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1530                 else if (bp->req_line_speed == SPEED_100) {
1531                         if (bp->req_duplex == DUPLEX_FULL)
1532                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1533                         else
1534                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1535                 } else if (bp->req_line_speed == SPEED_10) {
1536                         if (bp->req_duplex == DUPLEX_FULL)
1537                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1538                         else
1539                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1540                 }
1541         }
1542
1543         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1544                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1545         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1546                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1547
1548         if (port == PORT_TP)
1549                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1550                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1551
1552         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1553
1554         spin_unlock_bh(&bp->phy_lock);
1555         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1556         spin_lock_bh(&bp->phy_lock);
1557
1558         return 0;
1559 }
1560
1561 static int
1562 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1563 __releases(&bp->phy_lock)
1564 __acquires(&bp->phy_lock)
1565 {
1566         u32 adv, bmcr;
1567         u32 new_adv = 0;
1568
1569         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1570                 return (bnx2_setup_remote_phy(bp, port));
1571
1572         if (!(bp->autoneg & AUTONEG_SPEED)) {
1573                 u32 new_bmcr;
1574                 int force_link_down = 0;
1575
1576                 if (bp->req_line_speed == SPEED_2500) {
1577                         if (!bnx2_test_and_enable_2g5(bp))
1578                                 force_link_down = 1;
1579                 } else if (bp->req_line_speed == SPEED_1000) {
1580                         if (bnx2_test_and_disable_2g5(bp))
1581                                 force_link_down = 1;
1582                 }
1583                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1584                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1585
1586                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1587                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1588                 new_bmcr |= BMCR_SPEED1000;
1589
1590                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1591                         if (bp->req_line_speed == SPEED_2500)
1592                                 bnx2_enable_forced_2g5(bp);
1593                         else if (bp->req_line_speed == SPEED_1000) {
1594                                 bnx2_disable_forced_2g5(bp);
1595                                 new_bmcr &= ~0x2000;
1596                         }
1597
1598                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1599                         if (bp->req_line_speed == SPEED_2500)
1600                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1601                         else
1602                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1603                 }
1604
1605                 if (bp->req_duplex == DUPLEX_FULL) {
1606                         adv |= ADVERTISE_1000XFULL;
1607                         new_bmcr |= BMCR_FULLDPLX;
1608                 }
1609                 else {
1610                         adv |= ADVERTISE_1000XHALF;
1611                         new_bmcr &= ~BMCR_FULLDPLX;
1612                 }
1613                 if ((new_bmcr != bmcr) || (force_link_down)) {
1614                         /* Force a link down visible on the other side */
1615                         if (bp->link_up) {
1616                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1617                                                ~(ADVERTISE_1000XFULL |
1618                                                  ADVERTISE_1000XHALF));
1619                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1620                                         BMCR_ANRESTART | BMCR_ANENABLE);
1621
1622                                 bp->link_up = 0;
1623                                 netif_carrier_off(bp->dev);
1624                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1625                                 bnx2_report_link(bp);
1626                         }
1627                         bnx2_write_phy(bp, bp->mii_adv, adv);
1628                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1629                 } else {
1630                         bnx2_resolve_flow_ctrl(bp);
1631                         bnx2_set_mac_link(bp);
1632                 }
1633                 return 0;
1634         }
1635
1636         bnx2_test_and_enable_2g5(bp);
1637
1638         if (bp->advertising & ADVERTISED_1000baseT_Full)
1639                 new_adv |= ADVERTISE_1000XFULL;
1640
1641         new_adv |= bnx2_phy_get_pause_adv(bp);
1642
1643         bnx2_read_phy(bp, bp->mii_adv, &adv);
1644         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1645
1646         bp->serdes_an_pending = 0;
1647         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1648                 /* Force a link down visible on the other side */
1649                 if (bp->link_up) {
1650                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1651                         spin_unlock_bh(&bp->phy_lock);
1652                         msleep(20);
1653                         spin_lock_bh(&bp->phy_lock);
1654                 }
1655
1656                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1657                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1658                         BMCR_ANENABLE);
1659                 /* Speed up link-up time when the link partner
1660                  * does not autonegotiate which is very common
1661                  * in blade servers. Some blade servers use
1662                  * IPMI for kerboard input and it's important
1663                  * to minimize link disruptions. Autoneg. involves
1664                  * exchanging base pages plus 3 next pages and
1665                  * normally completes in about 120 msec.
1666                  */
1667                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1668                 bp->serdes_an_pending = 1;
1669                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1670         } else {
1671                 bnx2_resolve_flow_ctrl(bp);
1672                 bnx2_set_mac_link(bp);
1673         }
1674
1675         return 0;
1676 }
1677
1678 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1679         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1680                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1681                 (ADVERTISED_1000baseT_Full)
1682
1683 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1684         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1685         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1686         ADVERTISED_1000baseT_Full)
1687
1688 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1689         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1690
1691 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1692
1693 static void
1694 bnx2_set_default_remote_link(struct bnx2 *bp)
1695 {
1696         u32 link;
1697
1698         if (bp->phy_port == PORT_TP)
1699                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1700         else
1701                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1702
1703         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1704                 bp->req_line_speed = 0;
1705                 bp->autoneg |= AUTONEG_SPEED;
1706                 bp->advertising = ADVERTISED_Autoneg;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1708                         bp->advertising |= ADVERTISED_10baseT_Half;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1710                         bp->advertising |= ADVERTISED_10baseT_Full;
1711                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1712                         bp->advertising |= ADVERTISED_100baseT_Half;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1714                         bp->advertising |= ADVERTISED_100baseT_Full;
1715                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1716                         bp->advertising |= ADVERTISED_1000baseT_Full;
1717                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1718                         bp->advertising |= ADVERTISED_2500baseX_Full;
1719         } else {
1720                 bp->autoneg = 0;
1721                 bp->advertising = 0;
1722                 bp->req_duplex = DUPLEX_FULL;
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1724                         bp->req_line_speed = SPEED_10;
1725                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1726                                 bp->req_duplex = DUPLEX_HALF;
1727                 }
1728                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1729                         bp->req_line_speed = SPEED_100;
1730                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1731                                 bp->req_duplex = DUPLEX_HALF;
1732                 }
1733                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1734                         bp->req_line_speed = SPEED_1000;
1735                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1736                         bp->req_line_speed = SPEED_2500;
1737         }
1738 }
1739
1740 static void
1741 bnx2_set_default_link(struct bnx2 *bp)
1742 {
1743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1744                 bnx2_set_default_remote_link(bp);
1745                 return;
1746         }
1747
1748         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1749         bp->req_line_speed = 0;
1750         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1751                 u32 reg;
1752
1753                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1754
1755                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1756                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1757                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1758                         bp->autoneg = 0;
1759                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1760                         bp->req_duplex = DUPLEX_FULL;
1761                 }
1762         } else
1763                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1764 }
1765
1766 static void
1767 bnx2_send_heart_beat(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u32 addr;
1771
1772         spin_lock(&bp->indirect_lock);
1773         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1774         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1775         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1776         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1777         spin_unlock(&bp->indirect_lock);
1778 }
1779
1780 static void
1781 bnx2_remote_phy_event(struct bnx2 *bp)
1782 {
1783         u32 msg;
1784         u8 link_up = bp->link_up;
1785         u8 old_port;
1786
1787         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1788
1789         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1790                 bnx2_send_heart_beat(bp);
1791
1792         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1793
1794         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1795                 bp->link_up = 0;
1796         else {
1797                 u32 speed;
1798
1799                 bp->link_up = 1;
1800                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1801                 bp->duplex = DUPLEX_FULL;
1802                 switch (speed) {
1803                         case BNX2_LINK_STATUS_10HALF:
1804                                 bp->duplex = DUPLEX_HALF;
1805                         case BNX2_LINK_STATUS_10FULL:
1806                                 bp->line_speed = SPEED_10;
1807                                 break;
1808                         case BNX2_LINK_STATUS_100HALF:
1809                                 bp->duplex = DUPLEX_HALF;
1810                         case BNX2_LINK_STATUS_100BASE_T4:
1811                         case BNX2_LINK_STATUS_100FULL:
1812                                 bp->line_speed = SPEED_100;
1813                                 break;
1814                         case BNX2_LINK_STATUS_1000HALF:
1815                                 bp->duplex = DUPLEX_HALF;
1816                         case BNX2_LINK_STATUS_1000FULL:
1817                                 bp->line_speed = SPEED_1000;
1818                                 break;
1819                         case BNX2_LINK_STATUS_2500HALF:
1820                                 bp->duplex = DUPLEX_HALF;
1821                         case BNX2_LINK_STATUS_2500FULL:
1822                                 bp->line_speed = SPEED_2500;
1823                                 break;
1824                         default:
1825                                 bp->line_speed = 0;
1826                                 break;
1827                 }
1828
1829                 bp->flow_ctrl = 0;
1830                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1831                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1832                         if (bp->duplex == DUPLEX_FULL)
1833                                 bp->flow_ctrl = bp->req_flow_ctrl;
1834                 } else {
1835                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1836                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1837                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1838                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1839                 }
1840
1841                 old_port = bp->phy_port;
1842                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1843                         bp->phy_port = PORT_FIBRE;
1844                 else
1845                         bp->phy_port = PORT_TP;
1846
1847                 if (old_port != bp->phy_port)
1848                         bnx2_set_default_link(bp);
1849
1850         }
1851         if (bp->link_up != link_up)
1852                 bnx2_report_link(bp);
1853
1854         bnx2_set_mac_link(bp);
1855 }
1856
1857 static int
1858 bnx2_set_remote_link(struct bnx2 *bp)
1859 {
1860         u32 evt_code;
1861
1862         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1863         switch (evt_code) {
1864                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1865                         bnx2_remote_phy_event(bp);
1866                         break;
1867                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1868                 default:
1869                         bnx2_send_heart_beat(bp);
1870                         break;
1871         }
1872         return 0;
1873 }
1874
1875 static int
1876 bnx2_setup_copper_phy(struct bnx2 *bp)
1877 __releases(&bp->phy_lock)
1878 __acquires(&bp->phy_lock)
1879 {
1880         u32 bmcr;
1881         u32 new_bmcr;
1882
1883         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1884
1885         if (bp->autoneg & AUTONEG_SPEED) {
1886                 u32 adv_reg, adv1000_reg;
1887                 u32 new_adv_reg = 0;
1888                 u32 new_adv1000_reg = 0;
1889
1890                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1891                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1892                         ADVERTISE_PAUSE_ASYM);
1893
1894                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1895                 adv1000_reg &= PHY_ALL_1000_SPEED;
1896
1897                 if (bp->advertising & ADVERTISED_10baseT_Half)
1898                         new_adv_reg |= ADVERTISE_10HALF;
1899                 if (bp->advertising & ADVERTISED_10baseT_Full)
1900                         new_adv_reg |= ADVERTISE_10FULL;
1901                 if (bp->advertising & ADVERTISED_100baseT_Half)
1902                         new_adv_reg |= ADVERTISE_100HALF;
1903                 if (bp->advertising & ADVERTISED_100baseT_Full)
1904                         new_adv_reg |= ADVERTISE_100FULL;
1905                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1906                         new_adv1000_reg |= ADVERTISE_1000FULL;
1907
1908                 new_adv_reg |= ADVERTISE_CSMA;
1909
1910                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1911
1912                 if ((adv1000_reg != new_adv1000_reg) ||
1913                         (adv_reg != new_adv_reg) ||
1914                         ((bmcr & BMCR_ANENABLE) == 0)) {
1915
1916                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1917                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1918                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1919                                 BMCR_ANENABLE);
1920                 }
1921                 else if (bp->link_up) {
1922                         /* Flow ctrl may have changed from auto to forced */
1923                         /* or vice-versa. */
1924
1925                         bnx2_resolve_flow_ctrl(bp);
1926                         bnx2_set_mac_link(bp);
1927                 }
1928                 return 0;
1929         }
1930
1931         new_bmcr = 0;
1932         if (bp->req_line_speed == SPEED_100) {
1933                 new_bmcr |= BMCR_SPEED100;
1934         }
1935         if (bp->req_duplex == DUPLEX_FULL) {
1936                 new_bmcr |= BMCR_FULLDPLX;
1937         }
1938         if (new_bmcr != bmcr) {
1939                 u32 bmsr;
1940
1941                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1942                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943
1944                 if (bmsr & BMSR_LSTATUS) {
1945                         /* Force link down */
1946                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1947                         spin_unlock_bh(&bp->phy_lock);
1948                         msleep(50);
1949                         spin_lock_bh(&bp->phy_lock);
1950
1951                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1952                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1953                 }
1954
1955                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1956
1957                 /* Normally, the new speed is setup after the link has
1958                  * gone down and up again. In some cases, link will not go
1959                  * down so we need to set up the new speed here.
1960                  */
1961                 if (bmsr & BMSR_LSTATUS) {
1962                         bp->line_speed = bp->req_line_speed;
1963                         bp->duplex = bp->req_duplex;
1964                         bnx2_resolve_flow_ctrl(bp);
1965                         bnx2_set_mac_link(bp);
1966                 }
1967         } else {
1968                 bnx2_resolve_flow_ctrl(bp);
1969                 bnx2_set_mac_link(bp);
1970         }
1971         return 0;
1972 }
1973
1974 static int
1975 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1976 __releases(&bp->phy_lock)
1977 __acquires(&bp->phy_lock)
1978 {
1979         if (bp->loopback == MAC_LOOPBACK)
1980                 return 0;
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1983                 return (bnx2_setup_serdes_phy(bp, port));
1984         }
1985         else {
1986                 return (bnx2_setup_copper_phy(bp));
1987         }
1988 }
1989
1990 static int
1991 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1992 {
1993         u32 val;
1994
1995         bp->mii_bmcr = MII_BMCR + 0x10;
1996         bp->mii_bmsr = MII_BMSR + 0x10;
1997         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1998         bp->mii_adv = MII_ADVERTISE + 0x10;
1999         bp->mii_lpa = MII_LPA + 0x10;
2000         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2001
2002         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2003         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2004
2005         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2006         if (reset_phy)
2007                 bnx2_reset_phy(bp);
2008
2009         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2010
2011         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2012         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2013         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2014         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2015
2016         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2017         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2018         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2019                 val |= BCM5708S_UP1_2G5;
2020         else
2021                 val &= ~BCM5708S_UP1_2G5;
2022         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2023
2024         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2025         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2026         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2027         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2028
2029         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2030
2031         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2032               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2033         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2034
2035         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2036
2037         return 0;
2038 }
2039
2040 static int
2041 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2042 {
2043         u32 val;
2044
2045         if (reset_phy)
2046                 bnx2_reset_phy(bp);
2047
2048         bp->mii_up1 = BCM5708S_UP1;
2049
2050         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2051         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2052         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2053
2054         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2055         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2056         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2057
2058         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2059         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2060         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2061
2062         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2063                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2064                 val |= BCM5708S_UP1_2G5;
2065                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2066         }
2067
2068         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2069             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2070             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2071                 /* increase tx signal amplitude */
2072                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2073                                BCM5708S_BLK_ADDR_TX_MISC);
2074                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2075                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2076                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2077                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2078         }
2079
2080         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2081               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2082
2083         if (val) {
2084                 u32 is_backplane;
2085
2086                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2087                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2088                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2089                                        BCM5708S_BLK_ADDR_TX_MISC);
2090                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2091                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2092                                        BCM5708S_BLK_ADDR_DIG);
2093                 }
2094         }
2095         return 0;
2096 }
2097
2098 static int
2099 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2100 {
2101         if (reset_phy)
2102                 bnx2_reset_phy(bp);
2103
2104         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2105
2106         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2107                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2108
2109         if (bp->dev->mtu > 1500) {
2110                 u32 val;
2111
2112                 /* Set extended packet length bit */
2113                 bnx2_write_phy(bp, 0x18, 0x7);
2114                 bnx2_read_phy(bp, 0x18, &val);
2115                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2116
2117                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2118                 bnx2_read_phy(bp, 0x1c, &val);
2119                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2120         }
2121         else {
2122                 u32 val;
2123
2124                 bnx2_write_phy(bp, 0x18, 0x7);
2125                 bnx2_read_phy(bp, 0x18, &val);
2126                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2127
2128                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2129                 bnx2_read_phy(bp, 0x1c, &val);
2130                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2131         }
2132
2133         return 0;
2134 }
2135
2136 static int
2137 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2138 {
2139         u32 val;
2140
2141         if (reset_phy)
2142                 bnx2_reset_phy(bp);
2143
2144         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2145                 bnx2_write_phy(bp, 0x18, 0x0c00);
2146                 bnx2_write_phy(bp, 0x17, 0x000a);
2147                 bnx2_write_phy(bp, 0x15, 0x310b);
2148                 bnx2_write_phy(bp, 0x17, 0x201f);
2149                 bnx2_write_phy(bp, 0x15, 0x9506);
2150                 bnx2_write_phy(bp, 0x17, 0x401f);
2151                 bnx2_write_phy(bp, 0x15, 0x14e2);
2152                 bnx2_write_phy(bp, 0x18, 0x0400);
2153         }
2154
2155         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2156                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2157                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2158                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2159                 val &= ~(1 << 8);
2160                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2161         }
2162
2163         if (bp->dev->mtu > 1500) {
2164                 /* Set extended packet length bit */
2165                 bnx2_write_phy(bp, 0x18, 0x7);
2166                 bnx2_read_phy(bp, 0x18, &val);
2167                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2168
2169                 bnx2_read_phy(bp, 0x10, &val);
2170                 bnx2_write_phy(bp, 0x10, val | 0x1);
2171         }
2172         else {
2173                 bnx2_write_phy(bp, 0x18, 0x7);
2174                 bnx2_read_phy(bp, 0x18, &val);
2175                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2176
2177                 bnx2_read_phy(bp, 0x10, &val);
2178                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2179         }
2180
2181         /* ethernet@wirespeed */
2182         bnx2_write_phy(bp, 0x18, 0x7007);
2183         bnx2_read_phy(bp, 0x18, &val);
2184         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2185         return 0;
2186 }
2187
2188
2189 static int
2190 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2191 __releases(&bp->phy_lock)
2192 __acquires(&bp->phy_lock)
2193 {
2194         u32 val;
2195         int rc = 0;
2196
2197         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2198         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2199
2200         bp->mii_bmcr = MII_BMCR;
2201         bp->mii_bmsr = MII_BMSR;
2202         bp->mii_bmsr1 = MII_BMSR;
2203         bp->mii_adv = MII_ADVERTISE;
2204         bp->mii_lpa = MII_LPA;
2205
2206         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2207
2208         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2209                 goto setup_phy;
2210
2211         bnx2_read_phy(bp, MII_PHYSID1, &val);
2212         bp->phy_id = val << 16;
2213         bnx2_read_phy(bp, MII_PHYSID2, &val);
2214         bp->phy_id |= val & 0xffff;
2215
2216         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2217                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2218                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2219                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2220                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2221                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2222                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2223         }
2224         else {
2225                 rc = bnx2_init_copper_phy(bp, reset_phy);
2226         }
2227
2228 setup_phy:
2229         if (!rc)
2230                 rc = bnx2_setup_phy(bp, bp->phy_port);
2231
2232         return rc;
2233 }
2234
2235 static int
2236 bnx2_set_mac_loopback(struct bnx2 *bp)
2237 {
2238         u32 mac_mode;
2239
2240         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2241         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2242         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2243         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2244         bp->link_up = 1;
2245         return 0;
2246 }
2247
2248 static int bnx2_test_link(struct bnx2 *);
2249
2250 static int
2251 bnx2_set_phy_loopback(struct bnx2 *bp)
2252 {
2253         u32 mac_mode;
2254         int rc, i;
2255
2256         spin_lock_bh(&bp->phy_lock);
2257         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2258                             BMCR_SPEED1000);
2259         spin_unlock_bh(&bp->phy_lock);
2260         if (rc)
2261                 return rc;
2262
2263         for (i = 0; i < 10; i++) {
2264                 if (bnx2_test_link(bp) == 0)
2265                         break;
2266                 msleep(100);
2267         }
2268
2269         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2270         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2271                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2272                       BNX2_EMAC_MODE_25G_MODE);
2273
2274         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2275         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2276         bp->link_up = 1;
2277         return 0;
2278 }
2279
2280 static int
2281 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2282 {
2283         int i;
2284         u32 val;
2285
2286         bp->fw_wr_seq++;
2287         msg_data |= bp->fw_wr_seq;
2288
2289         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2290
2291         if (!ack)
2292                 return 0;
2293
2294         /* wait for an acknowledgement. */
2295         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2296                 msleep(10);
2297
2298                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2299
2300                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2301                         break;
2302         }
2303         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2304                 return 0;
2305
2306         /* If we timed out, inform the firmware that this is the case. */
2307         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2308                 if (!silent)
2309                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2310                                             "%x\n", msg_data);
2311
2312                 msg_data &= ~BNX2_DRV_MSG_CODE;
2313                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2314
2315                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2316
2317                 return -EBUSY;
2318         }
2319
2320         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2321                 return -EIO;
2322
2323         return 0;
2324 }
2325
2326 static int
2327 bnx2_init_5709_context(struct bnx2 *bp)
2328 {
2329         int i, ret = 0;
2330         u32 val;
2331
2332         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2333         val |= (BCM_PAGE_BITS - 8) << 16;
2334         REG_WR(bp, BNX2_CTX_COMMAND, val);
2335         for (i = 0; i < 10; i++) {
2336                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2337                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2338                         break;
2339                 udelay(2);
2340         }
2341         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2342                 return -EBUSY;
2343
2344         for (i = 0; i < bp->ctx_pages; i++) {
2345                 int j;
2346
2347                 if (bp->ctx_blk[i])
2348                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2349                 else
2350                         return -ENOMEM;
2351
2352                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2353                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2354                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2355                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2356                        (u64) bp->ctx_blk_mapping[i] >> 32);
2357                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2358                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2359                 for (j = 0; j < 10; j++) {
2360
2361                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2362                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2363                                 break;
2364                         udelay(5);
2365                 }
2366                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2367                         ret = -EBUSY;
2368                         break;
2369                 }
2370         }
2371         return ret;
2372 }
2373
2374 static void
2375 bnx2_init_context(struct bnx2 *bp)
2376 {
2377         u32 vcid;
2378
2379         vcid = 96;
2380         while (vcid) {
2381                 u32 vcid_addr, pcid_addr, offset;
2382                 int i;
2383
2384                 vcid--;
2385
2386                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2387                         u32 new_vcid;
2388
2389                         vcid_addr = GET_PCID_ADDR(vcid);
2390                         if (vcid & 0x8) {
2391                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2392                         }
2393                         else {
2394                                 new_vcid = vcid;
2395                         }
2396                         pcid_addr = GET_PCID_ADDR(new_vcid);
2397                 }
2398                 else {
2399                         vcid_addr = GET_CID_ADDR(vcid);
2400                         pcid_addr = vcid_addr;
2401                 }
2402
2403                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2404                         vcid_addr += (i << PHY_CTX_SHIFT);
2405                         pcid_addr += (i << PHY_CTX_SHIFT);
2406
2407                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2408                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2409
2410                         /* Zero out the context. */
2411                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2412                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2413                 }
2414         }
2415 }
2416
2417 static int
2418 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2419 {
2420         u16 *good_mbuf;
2421         u32 good_mbuf_cnt;
2422         u32 val;
2423
2424         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2425         if (good_mbuf == NULL) {
2426                 printk(KERN_ERR PFX "Failed to allocate memory in "
2427                                     "bnx2_alloc_bad_rbuf\n");
2428                 return -ENOMEM;
2429         }
2430
2431         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2432                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2433
2434         good_mbuf_cnt = 0;
2435
2436         /* Allocate a bunch of mbufs and save the good ones in an array. */
2437         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2438         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2439                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2440                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2441
2442                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2443
2444                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2445
2446                 /* The addresses with Bit 9 set are bad memory blocks. */
2447                 if (!(val & (1 << 9))) {
2448                         good_mbuf[good_mbuf_cnt] = (u16) val;
2449                         good_mbuf_cnt++;
2450                 }
2451
2452                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2453         }
2454
2455         /* Free the good ones back to the mbuf pool thus discarding
2456          * all the bad ones. */
2457         while (good_mbuf_cnt) {
2458                 good_mbuf_cnt--;
2459
2460                 val = good_mbuf[good_mbuf_cnt];
2461                 val = (val << 9) | val | 1;
2462
2463                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2464         }
2465         kfree(good_mbuf);
2466         return 0;
2467 }
2468
2469 static void
2470 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2471 {
2472         u32 val;
2473
2474         val = (mac_addr[0] << 8) | mac_addr[1];
2475
2476         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2477
2478         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2479                 (mac_addr[4] << 8) | mac_addr[5];
2480
2481         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2482 }
2483
2484 static inline int
2485 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2486 {
2487         dma_addr_t mapping;
2488         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2489         struct rx_bd *rxbd =
2490                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2491         struct page *page = alloc_page(GFP_ATOMIC);
2492
2493         if (!page)
2494                 return -ENOMEM;
2495         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2496                                PCI_DMA_FROMDEVICE);
2497         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2498                 __free_page(page);
2499                 return -EIO;
2500         }
2501
2502         rx_pg->page = page;
2503         pci_unmap_addr_set(rx_pg, mapping, mapping);
2504         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2505         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2506         return 0;
2507 }
2508
2509 static void
2510 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2511 {
2512         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2513         struct page *page = rx_pg->page;
2514
2515         if (!page)
2516                 return;
2517
2518         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2519                        PCI_DMA_FROMDEVICE);
2520
2521         __free_page(page);
2522         rx_pg->page = NULL;
2523 }
2524
2525 static inline int
2526 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2527 {
2528         struct sk_buff *skb;
2529         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2530         dma_addr_t mapping;
2531         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2532         unsigned long align;
2533
2534         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2535         if (skb == NULL) {
2536                 return -ENOMEM;
2537         }
2538
2539         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2540                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2541
2542         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2543                 PCI_DMA_FROMDEVICE);
2544         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2545                 dev_kfree_skb(skb);
2546                 return -EIO;
2547         }
2548
2549         rx_buf->skb = skb;
2550         pci_unmap_addr_set(rx_buf, mapping, mapping);
2551
2552         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2553         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2554
2555         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2556
2557         return 0;
2558 }
2559
2560 static int
2561 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2562 {
2563         struct status_block *sblk = bnapi->status_blk.msi;
2564         u32 new_link_state, old_link_state;
2565         int is_set = 1;
2566
2567         new_link_state = sblk->status_attn_bits & event;
2568         old_link_state = sblk->status_attn_bits_ack & event;
2569         if (new_link_state != old_link_state) {
2570                 if (new_link_state)
2571                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2572                 else
2573                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2574         } else
2575                 is_set = 0;
2576
2577         return is_set;
2578 }
2579
2580 static void
2581 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2582 {
2583         spin_lock(&bp->phy_lock);
2584
2585         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2586                 bnx2_set_link(bp);
2587         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2588                 bnx2_set_remote_link(bp);
2589
2590         spin_unlock(&bp->phy_lock);
2591
2592 }
2593
2594 static inline u16
2595 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2596 {
2597         u16 cons;
2598
2599         /* Tell compiler that status block fields can change. */
2600         barrier();
2601         cons = *bnapi->hw_tx_cons_ptr;
2602         barrier();
2603         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2604                 cons++;
2605         return cons;
2606 }
2607
2608 static int
2609 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2610 {
2611         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2612         u16 hw_cons, sw_cons, sw_ring_cons;
2613         int tx_pkt = 0, index;
2614         struct netdev_queue *txq;
2615
2616         index = (bnapi - bp->bnx2_napi);
2617         txq = netdev_get_tx_queue(bp->dev, index);
2618
2619         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2620         sw_cons = txr->tx_cons;
2621
2622         while (sw_cons != hw_cons) {
2623                 struct sw_tx_bd *tx_buf;
2624                 struct sk_buff *skb;
2625                 int i, last;
2626
2627                 sw_ring_cons = TX_RING_IDX(sw_cons);
2628
2629                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2630                 skb = tx_buf->skb;
2631
2632                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2633                 prefetch(&skb->end);
2634
2635                 /* partial BD completions possible with TSO packets */
2636                 if (tx_buf->is_gso) {
2637                         u16 last_idx, last_ring_idx;
2638
2639                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2640                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2641                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2642                                 last_idx++;
2643                         }
2644                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2645                                 break;
2646                         }
2647                 }
2648
2649                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2650
2651                 tx_buf->skb = NULL;
2652                 last = tx_buf->nr_frags;
2653
2654                 for (i = 0; i < last; i++) {
2655                         sw_cons = NEXT_TX_BD(sw_cons);
2656                 }
2657
2658                 sw_cons = NEXT_TX_BD(sw_cons);
2659
2660                 dev_kfree_skb(skb);
2661                 tx_pkt++;
2662                 if (tx_pkt == budget)
2663                         break;
2664
2665                 if (hw_cons == sw_cons)
2666                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2667         }
2668
2669         txr->hw_tx_cons = hw_cons;
2670         txr->tx_cons = sw_cons;
2671
2672         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2673          * before checking for netif_tx_queue_stopped().  Without the
2674          * memory barrier, there is a small possibility that bnx2_start_xmit()
2675          * will miss it and cause the queue to be stopped forever.
2676          */
2677         smp_mb();
2678
2679         if (unlikely(netif_tx_queue_stopped(txq)) &&
2680                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2681                 __netif_tx_lock(txq, smp_processor_id());
2682                 if ((netif_tx_queue_stopped(txq)) &&
2683                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2684                         netif_tx_wake_queue(txq);
2685                 __netif_tx_unlock(txq);
2686         }
2687
2688         return tx_pkt;
2689 }
2690
2691 static void
2692 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2693                         struct sk_buff *skb, int count)
2694 {
2695         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2696         struct rx_bd *cons_bd, *prod_bd;
2697         int i;
2698         u16 hw_prod, prod;
2699         u16 cons = rxr->rx_pg_cons;
2700
2701         cons_rx_pg = &rxr->rx_pg_ring[cons];
2702
2703         /* The caller was unable to allocate a new page to replace the
2704          * last one in the frags array, so we need to recycle that page
2705          * and then free the skb.
2706          */
2707         if (skb) {
2708                 struct page *page;
2709                 struct skb_shared_info *shinfo;
2710
2711                 shinfo = skb_shinfo(skb);
2712                 shinfo->nr_frags--;
2713                 page = shinfo->frags[shinfo->nr_frags].page;
2714                 shinfo->frags[shinfo->nr_frags].page = NULL;
2715
2716                 cons_rx_pg->page = page;
2717                 dev_kfree_skb(skb);
2718         }
2719
2720         hw_prod = rxr->rx_pg_prod;
2721
2722         for (i = 0; i < count; i++) {
2723                 prod = RX_PG_RING_IDX(hw_prod);
2724
2725                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2726                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2727                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2728                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2729
2730                 if (prod != cons) {
2731                         prod_rx_pg->page = cons_rx_pg->page;
2732                         cons_rx_pg->page = NULL;
2733                         pci_unmap_addr_set(prod_rx_pg, mapping,
2734                                 pci_unmap_addr(cons_rx_pg, mapping));
2735
2736                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2737                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2738
2739                 }
2740                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2741                 hw_prod = NEXT_RX_BD(hw_prod);
2742         }
2743         rxr->rx_pg_prod = hw_prod;
2744         rxr->rx_pg_cons = cons;
2745 }
2746
2747 static inline void
2748 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2749                   struct sk_buff *skb, u16 cons, u16 prod)
2750 {
2751         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2752         struct rx_bd *cons_bd, *prod_bd;
2753
2754         cons_rx_buf = &rxr->rx_buf_ring[cons];
2755         prod_rx_buf = &rxr->rx_buf_ring[prod];
2756
2757         pci_dma_sync_single_for_device(bp->pdev,
2758                 pci_unmap_addr(cons_rx_buf, mapping),
2759                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2760
2761         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2762
2763         prod_rx_buf->skb = skb;
2764
2765         if (cons == prod)
2766                 return;
2767
2768         pci_unmap_addr_set(prod_rx_buf, mapping,
2769                         pci_unmap_addr(cons_rx_buf, mapping));
2770
2771         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2772         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2773         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2774         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2775 }
2776
2777 static int
2778 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2779             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2780             u32 ring_idx)
2781 {
2782         int err;
2783         u16 prod = ring_idx & 0xffff;
2784
2785         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2786         if (unlikely(err)) {
2787                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2788                 if (hdr_len) {
2789                         unsigned int raw_len = len + 4;
2790                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2791
2792                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2793                 }
2794                 return err;
2795         }
2796
2797         skb_reserve(skb, BNX2_RX_OFFSET);
2798         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2799                          PCI_DMA_FROMDEVICE);
2800
2801         if (hdr_len == 0) {
2802                 skb_put(skb, len);
2803                 return 0;
2804         } else {
2805                 unsigned int i, frag_len, frag_size, pages;
2806                 struct sw_pg *rx_pg;
2807                 u16 pg_cons = rxr->rx_pg_cons;
2808                 u16 pg_prod = rxr->rx_pg_prod;
2809
2810                 frag_size = len + 4 - hdr_len;
2811                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2812                 skb_put(skb, hdr_len);
2813
2814                 for (i = 0; i < pages; i++) {
2815                         dma_addr_t mapping_old;
2816
2817                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2818                         if (unlikely(frag_len <= 4)) {
2819                                 unsigned int tail = 4 - frag_len;
2820
2821                                 rxr->rx_pg_cons = pg_cons;
2822                                 rxr->rx_pg_prod = pg_prod;
2823                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2824                                                         pages - i);
2825                                 skb->len -= tail;
2826                                 if (i == 0) {
2827                                         skb->tail -= tail;
2828                                 } else {
2829                                         skb_frag_t *frag =
2830                                                 &skb_shinfo(skb)->frags[i - 1];
2831                                         frag->size -= tail;
2832                                         skb->data_len -= tail;
2833                                         skb->truesize -= tail;
2834                                 }
2835                                 return 0;
2836                         }
2837                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2838
2839                         /* Don't unmap yet.  If we're unable to allocate a new
2840                          * page, we need to recycle the page and the DMA addr.
2841                          */
2842                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2843                         if (i == pages - 1)
2844                                 frag_len -= 4;
2845
2846                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2847                         rx_pg->page = NULL;
2848
2849                         err = bnx2_alloc_rx_page(bp, rxr,
2850                                                  RX_PG_RING_IDX(pg_prod));
2851                         if (unlikely(err)) {
2852                                 rxr->rx_pg_cons = pg_cons;
2853                                 rxr->rx_pg_prod = pg_prod;
2854                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2855                                                         pages - i);
2856                                 return err;
2857                         }
2858
2859                         pci_unmap_page(bp->pdev, mapping_old,
2860                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2861
2862                         frag_size -= frag_len;
2863                         skb->data_len += frag_len;
2864                         skb->truesize += frag_len;
2865                         skb->len += frag_len;
2866
2867                         pg_prod = NEXT_RX_BD(pg_prod);
2868                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2869                 }
2870                 rxr->rx_pg_prod = pg_prod;
2871                 rxr->rx_pg_cons = pg_cons;
2872         }
2873         return 0;
2874 }
2875
2876 static inline u16
2877 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2878 {
2879         u16 cons;
2880
2881         /* Tell compiler that status block fields can change. */
2882         barrier();
2883         cons = *bnapi->hw_rx_cons_ptr;
2884         barrier();
2885         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2886                 cons++;
2887         return cons;
2888 }
2889
2890 static int
2891 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2892 {
2893         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2894         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2895         struct l2_fhdr *rx_hdr;
2896         int rx_pkt = 0, pg_ring_used = 0;
2897
2898         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2899         sw_cons = rxr->rx_cons;
2900         sw_prod = rxr->rx_prod;
2901
2902         /* Memory barrier necessary as speculative reads of the rx
2903          * buffer can be ahead of the index in the status block
2904          */
2905         rmb();
2906         while (sw_cons != hw_cons) {
2907                 unsigned int len, hdr_len;
2908                 u32 status;
2909                 struct sw_bd *rx_buf;
2910                 struct sk_buff *skb;
2911                 dma_addr_t dma_addr;
2912                 u16 vtag = 0;
2913                 int hw_vlan __maybe_unused = 0;
2914
2915                 sw_ring_cons = RX_RING_IDX(sw_cons);
2916                 sw_ring_prod = RX_RING_IDX(sw_prod);
2917
2918                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2919                 skb = rx_buf->skb;
2920
2921                 rx_buf->skb = NULL;
2922
2923                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2924
2925                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2926                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2927                         PCI_DMA_FROMDEVICE);
2928
2929                 rx_hdr = (struct l2_fhdr *) skb->data;
2930                 len = rx_hdr->l2_fhdr_pkt_len;
2931                 status = rx_hdr->l2_fhdr_status;
2932
2933                 hdr_len = 0;
2934                 if (status & L2_FHDR_STATUS_SPLIT) {
2935                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2936                         pg_ring_used = 1;
2937                 } else if (len > bp->rx_jumbo_thresh) {
2938                         hdr_len = bp->rx_jumbo_thresh;
2939                         pg_ring_used = 1;
2940                 }
2941
2942                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2943                                        L2_FHDR_ERRORS_PHY_DECODE |
2944                                        L2_FHDR_ERRORS_ALIGNMENT |
2945                                        L2_FHDR_ERRORS_TOO_SHORT |
2946                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
2947
2948                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2949                                           sw_ring_prod);
2950                         if (pg_ring_used) {
2951                                 int pages;
2952
2953                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2954
2955                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2956                         }
2957                         goto next_rx;
2958                 }
2959
2960                 len -= 4;
2961
2962                 if (len <= bp->rx_copy_thresh) {
2963                         struct sk_buff *new_skb;
2964
2965                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2966                         if (new_skb == NULL) {
2967                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2968                                                   sw_ring_prod);
2969                                 goto next_rx;
2970                         }
2971
2972                         /* aligned copy */
2973                         skb_copy_from_linear_data_offset(skb,
2974                                                          BNX2_RX_OFFSET - 6,
2975                                       new_skb->data, len + 6);
2976                         skb_reserve(new_skb, 6);
2977                         skb_put(new_skb, len);
2978
2979                         bnx2_reuse_rx_skb(bp, rxr, skb,
2980                                 sw_ring_cons, sw_ring_prod);
2981
2982                         skb = new_skb;
2983                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2984                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2985                         goto next_rx;
2986
2987                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2988                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2989                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2990 #ifdef BCM_VLAN
2991                         if (bp->vlgrp)
2992                                 hw_vlan = 1;
2993                         else
2994 #endif
2995                         {
2996                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2997                                         __skb_push(skb, 4);
2998
2999                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3000                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3001                                 ve->h_vlan_TCI = htons(vtag);
3002                                 len += 4;
3003                         }
3004                 }
3005
3006                 skb->protocol = eth_type_trans(skb, bp->dev);
3007
3008                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3009                         (ntohs(skb->protocol) != 0x8100)) {
3010
3011                         dev_kfree_skb(skb);
3012                         goto next_rx;
3013
3014                 }
3015
3016                 skb->ip_summed = CHECKSUM_NONE;
3017                 if (bp->rx_csum &&
3018                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3019                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3020
3021                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3022                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3023                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3024                 }
3025
3026                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3027
3028 #ifdef BCM_VLAN
3029                 if (hw_vlan)
3030                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3031                 else
3032 #endif
3033                         netif_receive_skb(skb);
3034
3035                 rx_pkt++;
3036
3037 next_rx:
3038                 sw_cons = NEXT_RX_BD(sw_cons);
3039                 sw_prod = NEXT_RX_BD(sw_prod);
3040
3041                 if ((rx_pkt == budget))
3042                         break;
3043
3044                 /* Refresh hw_cons to see if there is new work */
3045                 if (sw_cons == hw_cons) {
3046                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3047                         rmb();
3048                 }
3049         }
3050         rxr->rx_cons = sw_cons;
3051         rxr->rx_prod = sw_prod;
3052
3053         if (pg_ring_used)
3054                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3055
3056         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3057
3058         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3059
3060         mmiowb();
3061
3062         return rx_pkt;
3063
3064 }
3065
3066 /* MSI ISR - The only difference between this and the INTx ISR
3067  * is that the MSI interrupt is always serviced.
3068  */
3069 static irqreturn_t
3070 bnx2_msi(int irq, void *dev_instance)
3071 {
3072         struct bnx2_napi *bnapi = dev_instance;
3073         struct bnx2 *bp = bnapi->bp;
3074
3075         prefetch(bnapi->status_blk.msi);
3076         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3077                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3078                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3079
3080         /* Return here if interrupt is disabled. */
3081         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3082                 return IRQ_HANDLED;
3083
3084         napi_schedule(&bnapi->napi);
3085
3086         return IRQ_HANDLED;
3087 }
3088
3089 static irqreturn_t
3090 bnx2_msi_1shot(int irq, void *dev_instance)
3091 {
3092         struct bnx2_napi *bnapi = dev_instance;
3093         struct bnx2 *bp = bnapi->bp;
3094
3095         prefetch(bnapi->status_blk.msi);
3096
3097         /* Return here if interrupt is disabled. */
3098         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3099                 return IRQ_HANDLED;
3100
3101         napi_schedule(&bnapi->napi);
3102
3103         return IRQ_HANDLED;
3104 }
3105
3106 static irqreturn_t
3107 bnx2_interrupt(int irq, void *dev_instance)
3108 {
3109         struct bnx2_napi *bnapi = dev_instance;
3110         struct bnx2 *bp = bnapi->bp;
3111         struct status_block *sblk = bnapi->status_blk.msi;
3112
3113         /* When using INTx, it is possible for the interrupt to arrive
3114          * at the CPU before the status block posted prior to the
3115          * interrupt. Reading a register will flush the status block.
3116          * When using MSI, the MSI message will always complete after
3117          * the status block write.
3118          */
3119         if ((sblk->status_idx == bnapi->last_status_idx) &&
3120             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3121              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3122                 return IRQ_NONE;
3123
3124         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3125                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3126                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3127
3128         /* Read back to deassert IRQ immediately to avoid too many
3129          * spurious interrupts.
3130          */
3131         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3132
3133         /* Return here if interrupt is shared and is disabled. */
3134         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3135                 return IRQ_HANDLED;
3136
3137         if (napi_schedule_prep(&bnapi->napi)) {
3138                 bnapi->last_status_idx = sblk->status_idx;
3139                 __napi_schedule(&bnapi->napi);
3140         }
3141
3142         return IRQ_HANDLED;
3143 }
3144
3145 static inline int
3146 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3147 {
3148         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3149         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3150
3151         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3152             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3153                 return 1;
3154         return 0;
3155 }
3156
3157 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3158                                  STATUS_ATTN_BITS_TIMER_ABORT)
3159
3160 static inline int
3161 bnx2_has_work(struct bnx2_napi *bnapi)
3162 {
3163         struct status_block *sblk = bnapi->status_blk.msi;
3164
3165         if (bnx2_has_fast_work(bnapi))
3166                 return 1;
3167
3168         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3169             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3170                 return 1;
3171
3172         return 0;
3173 }
3174
3175 static void
3176 bnx2_chk_missed_msi(struct bnx2 *bp)
3177 {
3178         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3179         u32 msi_ctrl;
3180
3181         if (bnx2_has_work(bnapi)) {
3182                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3183                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3184                         return;
3185
3186                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3187                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3188                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3189                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3190                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3191                 }
3192         }
3193
3194         bp->idle_chk_status_idx = bnapi->last_status_idx;
3195 }
3196
3197 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3198 {
3199         struct status_block *sblk = bnapi->status_blk.msi;
3200         u32 status_attn_bits = sblk->status_attn_bits;
3201         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3202
3203         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3204             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3205
3206                 bnx2_phy_int(bp, bnapi);
3207
3208                 /* This is needed to take care of transient status
3209                  * during link changes.
3210                  */
3211                 REG_WR(bp, BNX2_HC_COMMAND,
3212                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3213                 REG_RD(bp, BNX2_HC_COMMAND);
3214         }
3215 }
3216
3217 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3218                           int work_done, int budget)
3219 {
3220         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3221         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3222
3223         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3224                 bnx2_tx_int(bp, bnapi, 0);
3225
3226         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3227                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3228
3229         return work_done;
3230 }
3231
3232 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3233 {
3234         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3235         struct bnx2 *bp = bnapi->bp;
3236         int work_done = 0;
3237         struct status_block_msix *sblk = bnapi->status_blk.msix;
3238
3239         while (1) {
3240                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3241                 if (unlikely(work_done >= budget))
3242                         break;
3243
3244                 bnapi->last_status_idx = sblk->status_idx;
3245                 /* status idx must be read before checking for more work. */
3246                 rmb();
3247                 if (likely(!bnx2_has_fast_work(bnapi))) {
3248
3249                         napi_complete(napi);
3250                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3251                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3252                                bnapi->last_status_idx);
3253                         break;
3254                 }
3255         }
3256         return work_done;
3257 }
3258
3259 static int bnx2_poll(struct napi_struct *napi, int budget)
3260 {
3261         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3262         struct bnx2 *bp = bnapi->bp;
3263         int work_done = 0;
3264         struct status_block *sblk = bnapi->status_blk.msi;
3265
3266         while (1) {
3267                 bnx2_poll_link(bp, bnapi);
3268
3269                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3270
3271                 /* bnapi->last_status_idx is used below to tell the hw how
3272                  * much work has been processed, so we must read it before
3273                  * checking for more work.
3274                  */
3275                 bnapi->last_status_idx = sblk->status_idx;
3276
3277                 if (unlikely(work_done >= budget))
3278                         break;
3279
3280                 rmb();
3281                 if (likely(!bnx2_has_work(bnapi))) {
3282                         napi_complete(napi);
3283                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3284                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3286                                        bnapi->last_status_idx);
3287                                 break;
3288                         }
3289                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3290                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3291                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3292                                bnapi->last_status_idx);
3293
3294                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3295                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3296                                bnapi->last_status_idx);
3297                         break;
3298                 }
3299         }
3300
3301         return work_done;
3302 }
3303
3304 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3305  * from set_multicast.
3306  */
3307 static void
3308 bnx2_set_rx_mode(struct net_device *dev)
3309 {
3310         struct bnx2 *bp = netdev_priv(dev);
3311         u32 rx_mode, sort_mode;
3312         struct netdev_hw_addr *ha;
3313         int i;
3314
3315         if (!netif_running(dev))
3316                 return;
3317
3318         spin_lock_bh(&bp->phy_lock);
3319
3320         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3321                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3322         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3323 #ifdef BCM_VLAN
3324         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3325                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3326 #else
3327         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3328                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3329 #endif
3330         if (dev->flags & IFF_PROMISC) {
3331                 /* Promiscuous mode. */
3332                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3333                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3334                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3335         }
3336         else if (dev->flags & IFF_ALLMULTI) {
3337                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3338                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3339                                0xffffffff);
3340                 }
3341                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3342         }
3343         else {
3344                 /* Accept one or more multicast(s). */
3345                 struct dev_mc_list *mclist;
3346                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3347                 u32 regidx;
3348                 u32 bit;
3349                 u32 crc;
3350
3351                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3352
3353                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3354                      i++, mclist = mclist->next) {
3355
3356                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3357                         bit = crc & 0xff;
3358                         regidx = (bit & 0xe0) >> 5;
3359                         bit &= 0x1f;
3360                         mc_filter[regidx] |= (1 << bit);
3361                 }
3362
3363                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3364                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3365                                mc_filter[i]);
3366                 }
3367
3368                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3369         }
3370
3371         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3372                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3373                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3374                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3375         } else if (!(dev->flags & IFF_PROMISC)) {
3376                 /* Add all entries into to the match filter list */
3377                 i = 0;
3378                 list_for_each_entry(ha, &dev->uc_list, list) {
3379                         bnx2_set_mac_addr(bp, ha->addr,
3380                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3381                         sort_mode |= (1 <<
3382                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3383                         i++;
3384                 }
3385
3386         }
3387
3388         if (rx_mode != bp->rx_mode) {
3389                 bp->rx_mode = rx_mode;
3390                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3391         }
3392
3393         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3394         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3395         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3396
3397         spin_unlock_bh(&bp->phy_lock);
3398 }
3399
3400 static int __devinit
3401 check_fw_section(const struct firmware *fw,
3402                  const struct bnx2_fw_file_section *section,
3403                  u32 alignment, bool non_empty)
3404 {
3405         u32 offset = be32_to_cpu(section->offset);
3406         u32 len = be32_to_cpu(section->len);
3407
3408         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3409                 return -EINVAL;
3410         if ((non_empty && len == 0) || len > fw->size - offset ||
3411             len & (alignment - 1))
3412                 return -EINVAL;
3413         return 0;
3414 }
3415
3416 static int __devinit
3417 check_mips_fw_entry(const struct firmware *fw,
3418                     const struct bnx2_mips_fw_file_entry *entry)
3419 {
3420         if (check_fw_section(fw, &entry->text, 4, true) ||
3421             check_fw_section(fw, &entry->data, 4, false) ||
3422             check_fw_section(fw, &entry->rodata, 4, false))
3423                 return -EINVAL;
3424         return 0;
3425 }
3426
3427 static int __devinit
3428 bnx2_request_firmware(struct bnx2 *bp)
3429 {
3430         const char *mips_fw_file, *rv2p_fw_file;
3431         const struct bnx2_mips_fw_file *mips_fw;
3432         const struct bnx2_rv2p_fw_file *rv2p_fw;
3433         int rc;
3434
3435         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3436                 mips_fw_file = FW_MIPS_FILE_09;
3437                 rv2p_fw_file = FW_RV2P_FILE_09;
3438         } else {
3439                 mips_fw_file = FW_MIPS_FILE_06;
3440                 rv2p_fw_file = FW_RV2P_FILE_06;
3441         }
3442
3443         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3444         if (rc) {
3445                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3446                        mips_fw_file);
3447                 return rc;
3448         }
3449
3450         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3451         if (rc) {
3452                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3453                        rv2p_fw_file);
3454                 return rc;
3455         }
3456         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3457         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3458         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3459             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3460             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3461             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3462             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3463             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3464                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3465                        mips_fw_file);
3466                 return -EINVAL;
3467         }
3468         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3469             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3470             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3471                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3472                        rv2p_fw_file);
3473                 return -EINVAL;
3474         }
3475
3476         return 0;
3477 }
3478
3479 static u32
3480 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3481 {
3482         switch (idx) {
3483         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3484                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3485                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3486                 break;
3487         }
3488         return rv2p_code;
3489 }
3490
3491 static int
3492 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3493              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3494 {
3495         u32 rv2p_code_len, file_offset;
3496         __be32 *rv2p_code;
3497         int i;
3498         u32 val, cmd, addr;
3499
3500         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3501         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3502
3503         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3504
3505         if (rv2p_proc == RV2P_PROC1) {
3506                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3507                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3508         } else {
3509                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3510                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3511         }
3512
3513         for (i = 0; i < rv2p_code_len; i += 8) {
3514                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3515                 rv2p_code++;
3516                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3517                 rv2p_code++;
3518
3519                 val = (i / 8) | cmd;
3520                 REG_WR(bp, addr, val);
3521         }
3522
3523         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3524         for (i = 0; i < 8; i++) {
3525                 u32 loc, code;
3526
3527                 loc = be32_to_cpu(fw_entry->fixup[i]);
3528                 if (loc && ((loc * 4) < rv2p_code_len)) {
3529                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3530                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3531                         code = be32_to_cpu(*(rv2p_code + loc));
3532                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3533                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3534
3535                         val = (loc / 2) | cmd;
3536                         REG_WR(bp, addr, val);
3537                 }
3538         }
3539
3540         /* Reset the processor, un-stall is done later. */
3541         if (rv2p_proc == RV2P_PROC1) {
3542                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3543         }
3544         else {
3545                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3546         }
3547
3548         return 0;
3549 }
3550
3551 static int
3552 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3553             const struct bnx2_mips_fw_file_entry *fw_entry)
3554 {
3555         u32 addr, len, file_offset;
3556         __be32 *data;
3557         u32 offset;
3558         u32 val;
3559
3560         /* Halt the CPU. */
3561         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3562         val |= cpu_reg->mode_value_halt;
3563         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3564         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3565
3566         /* Load the Text area. */
3567         addr = be32_to_cpu(fw_entry->text.addr);
3568         len = be32_to_cpu(fw_entry->text.len);
3569         file_offset = be32_to_cpu(fw_entry->text.offset);
3570         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3571
3572         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3573         if (len) {
3574                 int j;
3575
3576                 for (j = 0; j < (len / 4); j++, offset += 4)
3577                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3578         }
3579
3580         /* Load the Data area. */
3581         addr = be32_to_cpu(fw_entry->data.addr);
3582         len = be32_to_cpu(fw_entry->data.len);
3583         file_offset = be32_to_cpu(fw_entry->data.offset);
3584         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3585
3586         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3587         if (len) {
3588                 int j;
3589
3590                 for (j = 0; j < (len / 4); j++, offset += 4)
3591                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3592         }
3593
3594         /* Load the Read-Only area. */
3595         addr = be32_to_cpu(fw_entry->rodata.addr);
3596         len = be32_to_cpu(fw_entry->rodata.len);
3597         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3598         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3599
3600         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3601         if (len) {
3602                 int j;
3603
3604                 for (j = 0; j < (len / 4); j++, offset += 4)
3605                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3606         }
3607
3608         /* Clear the pre-fetch instruction. */
3609         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3610
3611         val = be32_to_cpu(fw_entry->start_addr);
3612         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3613
3614         /* Start the CPU. */
3615         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3616         val &= ~cpu_reg->mode_value_halt;
3617         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3618         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3619
3620         return 0;
3621 }
3622
3623 static int
3624 bnx2_init_cpus(struct bnx2 *bp)
3625 {
3626         const struct bnx2_mips_fw_file *mips_fw =
3627                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3628         const struct bnx2_rv2p_fw_file *rv2p_fw =
3629                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3630         int rc;
3631
3632         /* Initialize the RV2P processor. */
3633         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3634         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3635
3636         /* Initialize the RX Processor. */
3637         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3638         if (rc)
3639                 goto init_cpu_err;
3640
3641         /* Initialize the TX Processor. */
3642         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3643         if (rc)
3644                 goto init_cpu_err;
3645
3646         /* Initialize the TX Patch-up Processor. */
3647         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3648         if (rc)
3649                 goto init_cpu_err;
3650
3651         /* Initialize the Completion Processor. */
3652         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3653         if (rc)
3654                 goto init_cpu_err;
3655
3656         /* Initialize the Command Processor. */
3657         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3658
3659 init_cpu_err:
3660         return rc;
3661 }
3662
3663 static int
3664 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3665 {
3666         u16 pmcsr;
3667
3668         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3669
3670         switch (state) {
3671         case PCI_D0: {
3672                 u32 val;
3673
3674                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3675                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3676                         PCI_PM_CTRL_PME_STATUS);
3677
3678                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3679                         /* delay required during transition out of D3hot */
3680                         msleep(20);
3681
3682                 val = REG_RD(bp, BNX2_EMAC_MODE);
3683                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3684                 val &= ~BNX2_EMAC_MODE_MPKT;
3685                 REG_WR(bp, BNX2_EMAC_MODE, val);
3686
3687                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3688                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3689                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3690                 break;
3691         }
3692         case PCI_D3hot: {
3693                 int i;
3694                 u32 val, wol_msg;
3695
3696                 if (bp->wol) {
3697                         u32 advertising;
3698                         u8 autoneg;
3699
3700                         autoneg = bp->autoneg;
3701                         advertising = bp->advertising;
3702
3703                         if (bp->phy_port == PORT_TP) {
3704                                 bp->autoneg = AUTONEG_SPEED;
3705                                 bp->advertising = ADVERTISED_10baseT_Half |
3706                                         ADVERTISED_10baseT_Full |
3707                                         ADVERTISED_100baseT_Half |
3708                                         ADVERTISED_100baseT_Full |
3709                                         ADVERTISED_Autoneg;
3710                         }
3711
3712                         spin_lock_bh(&bp->phy_lock);
3713                         bnx2_setup_phy(bp, bp->phy_port);
3714                         spin_unlock_bh(&bp->phy_lock);
3715
3716                         bp->autoneg = autoneg;
3717                         bp->advertising = advertising;
3718
3719                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3720
3721                         val = REG_RD(bp, BNX2_EMAC_MODE);
3722
3723                         /* Enable port mode. */
3724                         val &= ~BNX2_EMAC_MODE_PORT;
3725                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3726                                BNX2_EMAC_MODE_ACPI_RCVD |
3727                                BNX2_EMAC_MODE_MPKT;
3728                         if (bp->phy_port == PORT_TP)
3729                                 val |= BNX2_EMAC_MODE_PORT_MII;
3730                         else {
3731                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3732                                 if (bp->line_speed == SPEED_2500)
3733                                         val |= BNX2_EMAC_MODE_25G_MODE;
3734                         }
3735
3736                         REG_WR(bp, BNX2_EMAC_MODE, val);
3737
3738                         /* receive all multicast */
3739                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3740                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3741                                        0xffffffff);
3742                         }
3743                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3744                                BNX2_EMAC_RX_MODE_SORT_MODE);
3745
3746                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3747                               BNX2_RPM_SORT_USER0_MC_EN;
3748                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3749                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3750                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3751                                BNX2_RPM_SORT_USER0_ENA);
3752
3753                         /* Need to enable EMAC and RPM for WOL. */
3754                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3755                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3756                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3757                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3758
3759                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3760                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3761                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3762
3763                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3764                 }
3765                 else {
3766                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3767                 }
3768
3769                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3770                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3771                                      1, 0);
3772
3773                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3774                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3775                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3776
3777                         if (bp->wol)
3778                                 pmcsr |= 3;
3779                 }
3780                 else {
3781                         pmcsr |= 3;
3782                 }
3783                 if (bp->wol) {
3784                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3785                 }
3786                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3787                                       pmcsr);
3788
3789                 /* No more memory access after this point until
3790                  * device is brought back to D0.
3791                  */
3792                 udelay(50);
3793                 break;
3794         }
3795         default:
3796                 return -EINVAL;
3797         }
3798         return 0;
3799 }
3800
3801 static int
3802 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3803 {
3804         u32 val;
3805         int j;
3806
3807         /* Request access to the flash interface. */
3808         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3809         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3810                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3811                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3812                         break;
3813
3814                 udelay(5);
3815         }
3816
3817         if (j >= NVRAM_TIMEOUT_COUNT)
3818                 return -EBUSY;
3819
3820         return 0;
3821 }
3822
3823 static int
3824 bnx2_release_nvram_lock(struct bnx2 *bp)
3825 {
3826         int j;
3827         u32 val;
3828
3829         /* Relinquish nvram interface. */
3830         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3831
3832         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3833                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3834                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3835                         break;
3836
3837                 udelay(5);
3838         }
3839
3840         if (j >= NVRAM_TIMEOUT_COUNT)
3841                 return -EBUSY;
3842
3843         return 0;
3844 }
3845
3846
3847 static int
3848 bnx2_enable_nvram_write(struct bnx2 *bp)
3849 {
3850         u32 val;
3851
3852         val = REG_RD(bp, BNX2_MISC_CFG);
3853         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3854
3855         if (bp->flash_info->flags & BNX2_NV_WREN) {
3856                 int j;
3857
3858                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3859                 REG_WR(bp, BNX2_NVM_COMMAND,
3860                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3861
3862                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3863                         udelay(5);
3864
3865                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3866                         if (val & BNX2_NVM_COMMAND_DONE)
3867                                 break;
3868                 }
3869
3870                 if (j >= NVRAM_TIMEOUT_COUNT)
3871                         return -EBUSY;
3872         }
3873         return 0;
3874 }
3875
3876 static void
3877 bnx2_disable_nvram_write(struct bnx2 *bp)
3878 {
3879         u32 val;
3880
3881         val = REG_RD(bp, BNX2_MISC_CFG);
3882         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3883 }
3884
3885
3886 static void
3887 bnx2_enable_nvram_access(struct bnx2 *bp)
3888 {
3889         u32 val;
3890
3891         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3892         /* Enable both bits, even on read. */
3893         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3894                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3895 }
3896
3897 static void
3898 bnx2_disable_nvram_access(struct bnx2 *bp)
3899 {
3900         u32 val;
3901
3902         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3903         /* Disable both bits, even after read. */
3904         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3905                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3906                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3907 }
3908
3909 static int
3910 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3911 {
3912         u32 cmd;
3913         int j;
3914
3915         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3916                 /* Buffered flash, no erase needed */
3917                 return 0;
3918
3919         /* Build an erase command */
3920         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3921               BNX2_NVM_COMMAND_DOIT;
3922
3923         /* Need to clear DONE bit separately. */
3924         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3925
3926         /* Address of the NVRAM to read from. */
3927         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3928
3929         /* Issue an erase command. */
3930         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3931
3932         /* Wait for completion. */
3933         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3934                 u32 val;
3935
3936                 udelay(5);
3937
3938                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3939                 if (val & BNX2_NVM_COMMAND_DONE)
3940                         break;
3941         }
3942
3943         if (j >= NVRAM_TIMEOUT_COUNT)
3944                 return -EBUSY;
3945
3946         return 0;
3947 }
3948
3949 static int
3950 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3951 {
3952         u32 cmd;
3953         int j;
3954
3955         /* Build the command word. */
3956         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3957
3958         /* Calculate an offset of a buffered flash, not needed for 5709. */
3959         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3960                 offset = ((offset / bp->flash_info->page_size) <<
3961                            bp->flash_info->page_bits) +
3962                           (offset % bp->flash_info->page_size);
3963         }
3964
3965         /* Need to clear DONE bit separately. */
3966         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3967
3968         /* Address of the NVRAM to read from. */
3969         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3970
3971         /* Issue a read command. */
3972         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3973
3974         /* Wait for completion. */
3975         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3976                 u32 val;
3977
3978                 udelay(5);
3979
3980                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3981                 if (val & BNX2_NVM_COMMAND_DONE) {
3982                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3983                         memcpy(ret_val, &v, 4);
3984                         break;
3985                 }
3986         }
3987         if (j >= NVRAM_TIMEOUT_COUNT)
3988                 return -EBUSY;
3989
3990         return 0;
3991 }
3992
3993
3994 static int
3995 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3996 {
3997         u32 cmd;
3998         __be32 val32;
3999         int j;
4000
4001         /* Build the command word. */
4002         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4003
4004         /* Calculate an offset of a buffered flash, not needed for 5709. */
4005         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4006                 offset = ((offset / bp->flash_info->page_size) <<
4007                           bp->flash_info->page_bits) +
4008                          (offset % bp->flash_info->page_size);
4009         }
4010
4011         /* Need to clear DONE bit separately. */
4012         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4013
4014         memcpy(&val32, val, 4);
4015
4016         /* Write the data. */
4017         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4018
4019         /* Address of the NVRAM to write to. */
4020         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4021
4022         /* Issue the write command. */
4023         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4024
4025         /* Wait for completion. */
4026         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4027                 udelay(5);
4028
4029                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4030                         break;
4031         }
4032         if (j >= NVRAM_TIMEOUT_COUNT)
4033                 return -EBUSY;
4034
4035         return 0;
4036 }
4037
4038 static int
4039 bnx2_init_nvram(struct bnx2 *bp)
4040 {
4041         u32 val;
4042         int j, entry_count, rc = 0;
4043         struct flash_spec *flash;
4044
4045         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4046                 bp->flash_info = &flash_5709;
4047                 goto get_flash_size;
4048         }
4049
4050         /* Determine the selected interface. */
4051         val = REG_RD(bp, BNX2_NVM_CFG1);
4052
4053         entry_count = ARRAY_SIZE(flash_table);
4054
4055         if (val & 0x40000000) {
4056
4057                 /* Flash interface has been reconfigured */
4058                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4059                      j++, flash++) {
4060                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4061                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4062                                 bp->flash_info = flash;
4063                                 break;
4064                         }
4065                 }
4066         }
4067         else {
4068                 u32 mask;
4069                 /* Not yet been reconfigured */
4070
4071                 if (val & (1 << 23))
4072                         mask = FLASH_BACKUP_STRAP_MASK;
4073                 else
4074                         mask = FLASH_STRAP_MASK;
4075
4076                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4077                         j++, flash++) {
4078
4079                         if ((val & mask) == (flash->strapping & mask)) {
4080                                 bp->flash_info = flash;
4081
4082                                 /* Request access to the flash interface. */
4083                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4084                                         return rc;
4085
4086                                 /* Enable access to flash interface */
4087                                 bnx2_enable_nvram_access(bp);
4088
4089                                 /* Reconfigure the flash interface */
4090                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4091                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4092                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4093                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4094
4095                                 /* Disable access to flash interface */
4096                                 bnx2_disable_nvram_access(bp);
4097                                 bnx2_release_nvram_lock(bp);
4098
4099                                 break;
4100                         }
4101                 }
4102         } /* if (val & 0x40000000) */
4103
4104         if (j == entry_count) {
4105                 bp->flash_info = NULL;
4106                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4107                 return -ENODEV;
4108         }
4109
4110 get_flash_size:
4111         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4112         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4113         if (val)
4114                 bp->flash_size = val;
4115         else
4116                 bp->flash_size = bp->flash_info->total_size;
4117
4118         return rc;
4119 }
4120
4121 static int
4122 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4123                 int buf_size)
4124 {
4125         int rc = 0;
4126         u32 cmd_flags, offset32, len32, extra;
4127
4128         if (buf_size == 0)
4129                 return 0;
4130
4131         /* Request access to the flash interface. */
4132         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4133                 return rc;
4134
4135         /* Enable access to flash interface */
4136         bnx2_enable_nvram_access(bp);
4137
4138         len32 = buf_size;
4139         offset32 = offset;
4140         extra = 0;
4141
4142         cmd_flags = 0;
4143
4144         if (offset32 & 3) {
4145                 u8 buf[4];
4146                 u32 pre_len;
4147
4148                 offset32 &= ~3;
4149                 pre_len = 4 - (offset & 3);
4150
4151                 if (pre_len >= len32) {
4152                         pre_len = len32;
4153                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4154                                     BNX2_NVM_COMMAND_LAST;
4155                 }
4156                 else {
4157                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4158                 }
4159
4160                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4161
4162                 if (rc)
4163                         return rc;
4164
4165                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4166
4167                 offset32 += 4;
4168                 ret_buf += pre_len;
4169                 len32 -= pre_len;
4170         }
4171         if (len32 & 3) {
4172                 extra = 4 - (len32 & 3);
4173                 len32 = (len32 + 4) & ~3;
4174         }
4175
4176         if (len32 == 4) {
4177                 u8 buf[4];
4178
4179                 if (cmd_flags)
4180                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4181                 else
4182                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4183                                     BNX2_NVM_COMMAND_LAST;
4184
4185                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4186
4187                 memcpy(ret_buf, buf, 4 - extra);
4188         }
4189         else if (len32 > 0) {
4190                 u8 buf[4];
4191
4192                 /* Read the first word. */
4193                 if (cmd_flags)
4194                         cmd_flags = 0;
4195                 else
4196                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4197
4198                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4199
4200                 /* Advance to the next dword. */
4201                 offset32 += 4;
4202                 ret_buf += 4;
4203                 len32 -= 4;
4204
4205                 while (len32 > 4 && rc == 0) {
4206                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4207
4208                         /* Advance to the next dword. */
4209                         offset32 += 4;
4210                         ret_buf += 4;
4211                         len32 -= 4;
4212                 }
4213
4214                 if (rc)
4215                         return rc;
4216
4217                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4218                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4219
4220                 memcpy(ret_buf, buf, 4 - extra);
4221         }
4222
4223         /* Disable access to flash interface */
4224         bnx2_disable_nvram_access(bp);
4225
4226         bnx2_release_nvram_lock(bp);
4227
4228         return rc;
4229 }
4230
4231 static int
4232 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4233                 int buf_size)
4234 {
4235         u32 written, offset32, len32;
4236         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4237         int rc = 0;
4238         int align_start, align_end;
4239
4240         buf = data_buf;
4241         offset32 = offset;
4242         len32 = buf_size;
4243         align_start = align_end = 0;
4244
4245         if ((align_start = (offset32 & 3))) {
4246                 offset32 &= ~3;
4247                 len32 += align_start;
4248                 if (len32 < 4)
4249                         len32 = 4;
4250                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4251                         return rc;
4252         }
4253
4254         if (len32 & 3) {
4255                 align_end = 4 - (len32 & 3);
4256                 len32 += align_end;
4257                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4258                         return rc;
4259         }
4260
4261         if (align_start || align_end) {
4262                 align_buf = kmalloc(len32, GFP_KERNEL);
4263                 if (align_buf == NULL)
4264                         return -ENOMEM;
4265                 if (align_start) {
4266                         memcpy(align_buf, start, 4);
4267                 }
4268                 if (align_end) {
4269                         memcpy(align_buf + len32 - 4, end, 4);
4270                 }
4271                 memcpy(align_buf + align_start, data_buf, buf_size);
4272                 buf = align_buf;
4273         }
4274
4275         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4276                 flash_buffer = kmalloc(264, GFP_KERNEL);
4277                 if (flash_buffer == NULL) {
4278                         rc = -ENOMEM;
4279                         goto nvram_write_end;
4280                 }
4281         }
4282
4283         written = 0;
4284         while ((written < len32) && (rc == 0)) {
4285                 u32 page_start, page_end, data_start, data_end;
4286                 u32 addr, cmd_flags;
4287                 int i;
4288
4289                 /* Find the page_start addr */
4290                 page_start = offset32 + written;
4291                 page_start -= (page_start % bp->flash_info->page_size);
4292                 /* Find the page_end addr */
4293                 page_end = page_start + bp->flash_info->page_size;
4294                 /* Find the data_start addr */
4295                 data_start = (written == 0) ? offset32 : page_start;
4296                 /* Find the data_end addr */
4297                 data_end = (page_end > offset32 + len32) ?
4298                         (offset32 + len32) : page_end;
4299
4300                 /* Request access to the flash interface. */
4301                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4302                         goto nvram_write_end;
4303
4304                 /* Enable access to flash interface */
4305                 bnx2_enable_nvram_access(bp);
4306
4307                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4308                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4309                         int j;
4310
4311                         /* Read the whole page into the buffer
4312                          * (non-buffer flash only) */
4313                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4314                                 if (j == (bp->flash_info->page_size - 4)) {
4315                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4316                                 }
4317                                 rc = bnx2_nvram_read_dword(bp,
4318                                         page_start + j,
4319                                         &flash_buffer[j],
4320                                         cmd_flags);
4321
4322                                 if (rc)
4323                                         goto nvram_write_end;
4324
4325                                 cmd_flags = 0;
4326                         }
4327                 }
4328
4329                 /* Enable writes to flash interface (unlock write-protect) */
4330                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4331                         goto nvram_write_end;
4332
4333                 /* Loop to write back the buffer data from page_start to
4334                  * data_start */
4335                 i = 0;
4336                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4337                         /* Erase the page */
4338                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4339                                 goto nvram_write_end;
4340
4341                         /* Re-enable the write again for the actual write */
4342                         bnx2_enable_nvram_write(bp);
4343
4344                         for (addr = page_start; addr < data_start;
4345                                 addr += 4, i += 4) {
4346
4347                                 rc = bnx2_nvram_write_dword(bp, addr,
4348                                         &flash_buffer[i], cmd_flags);
4349
4350                                 if (rc != 0)
4351                                         goto nvram_write_end;
4352
4353                                 cmd_flags = 0;
4354                         }
4355                 }
4356
4357                 /* Loop to write the new data from data_start to data_end */
4358                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4359                         if ((addr == page_end - 4) ||
4360                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4361                                  (addr == data_end - 4))) {
4362
4363                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4364                         }
4365                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4366                                 cmd_flags);
4367
4368                         if (rc != 0)
4369                                 goto nvram_write_end;
4370
4371                         cmd_flags = 0;
4372                         buf += 4;
4373                 }
4374
4375                 /* Loop to write back the buffer data from data_end
4376                  * to page_end */
4377                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4378                         for (addr = data_end; addr < page_end;
4379                                 addr += 4, i += 4) {
4380
4381                                 if (addr == page_end-4) {
4382                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4383                                 }
4384                                 rc = bnx2_nvram_write_dword(bp, addr,
4385                                         &flash_buffer[i], cmd_flags);
4386
4387                                 if (rc != 0)
4388                                         goto nvram_write_end;
4389
4390                                 cmd_flags = 0;
4391                         }
4392                 }
4393
4394                 /* Disable writes to flash interface (lock write-protect) */
4395                 bnx2_disable_nvram_write(bp);
4396
4397                 /* Disable access to flash interface */
4398                 bnx2_disable_nvram_access(bp);
4399                 bnx2_release_nvram_lock(bp);
4400
4401                 /* Increment written */
4402                 written += data_end - data_start;
4403         }
4404
4405 nvram_write_end:
4406         kfree(flash_buffer);
4407         kfree(align_buf);
4408         return rc;
4409 }
4410
4411 static void
4412 bnx2_init_fw_cap(struct bnx2 *bp)
4413 {
4414         u32 val, sig = 0;
4415
4416         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4417         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4418
4419         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4420                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4421
4422         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4423         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4424                 return;
4425
4426         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4427                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4428                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4429         }
4430
4431         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4432             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4433                 u32 link;
4434
4435                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4436
4437                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4438                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4439                         bp->phy_port = PORT_FIBRE;
4440                 else
4441                         bp->phy_port = PORT_TP;
4442
4443                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4444                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4445         }
4446
4447         if (netif_running(bp->dev) && sig)
4448                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4449 }
4450
4451 static void
4452 bnx2_setup_msix_tbl(struct bnx2 *bp)
4453 {
4454         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4455
4456         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4457         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4458 }
4459
4460 static int
4461 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4462 {
4463         u32 val;
4464         int i, rc = 0;
4465         u8 old_port;
4466
4467         /* Wait for the current PCI transaction to complete before
4468          * issuing a reset. */
4469         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4470                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4471                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4472                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4473                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4474         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4475         udelay(5);
4476
4477         /* Wait for the firmware to tell us it is ok to issue a reset. */
4478         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4479
4480         /* Deposit a driver reset signature so the firmware knows that
4481          * this is a soft reset. */
4482         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4483                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4484
4485         /* Do a dummy read to force the chip to complete all current transaction
4486          * before we issue a reset. */
4487         val = REG_RD(bp, BNX2_MISC_ID);
4488
4489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4490                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4491                 REG_RD(bp, BNX2_MISC_COMMAND);
4492                 udelay(5);
4493
4494                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4495                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4496
4497                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4498
4499         } else {
4500                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4501                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4502                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4503
4504                 /* Chip reset. */
4505                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4506
4507                 /* Reading back any register after chip reset will hang the
4508                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4509                  * of margin for write posting.
4510                  */
4511                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4512                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4513                         msleep(20);
4514
4515                 /* Reset takes approximate 30 usec */
4516                 for (i = 0; i < 10; i++) {
4517                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4518                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4519                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4520                                 break;
4521                         udelay(10);
4522                 }
4523
4524                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4525                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4526                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4527                         return -EBUSY;
4528                 }
4529         }
4530
4531         /* Make sure byte swapping is properly configured. */
4532         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4533         if (val != 0x01020304) {
4534                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4535                 return -ENODEV;
4536         }
4537
4538         /* Wait for the firmware to finish its initialization. */
4539         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4540         if (rc)
4541                 return rc;
4542
4543         spin_lock_bh(&bp->phy_lock);
4544         old_port = bp->phy_port;
4545         bnx2_init_fw_cap(bp);
4546         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4547             old_port != bp->phy_port)
4548                 bnx2_set_default_remote_link(bp);
4549         spin_unlock_bh(&bp->phy_lock);
4550
4551         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4552                 /* Adjust the voltage regular to two steps lower.  The default
4553                  * of this register is 0x0000000e. */
4554                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4555
4556                 /* Remove bad rbuf memory from the free pool. */
4557                 rc = bnx2_alloc_bad_rbuf(bp);
4558         }
4559
4560         if (bp->flags & BNX2_FLAG_USING_MSIX)
4561                 bnx2_setup_msix_tbl(bp);
4562
4563         return rc;
4564 }
4565
4566 static int
4567 bnx2_init_chip(struct bnx2 *bp)
4568 {
4569         u32 val, mtu;
4570         int rc, i;
4571
4572         /* Make sure the interrupt is not active. */
4573         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4574
4575         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4576               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4577 #ifdef __BIG_ENDIAN
4578               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4579 #endif
4580               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4581               DMA_READ_CHANS << 12 |
4582               DMA_WRITE_CHANS << 16;
4583
4584         val |= (0x2 << 20) | (1 << 11);
4585
4586         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4587                 val |= (1 << 23);
4588
4589         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4590             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4591                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4592
4593         REG_WR(bp, BNX2_DMA_CONFIG, val);
4594
4595         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4596                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4597                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4598                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4599         }
4600
4601         if (bp->flags & BNX2_FLAG_PCIX) {
4602                 u16 val16;
4603
4604                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4605                                      &val16);
4606                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4607                                       val16 & ~PCI_X_CMD_ERO);
4608         }
4609
4610         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4611                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4612                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4613                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4614
4615         /* Initialize context mapping and zero out the quick contexts.  The
4616          * context block must have already been enabled. */
4617         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4618                 rc = bnx2_init_5709_context(bp);
4619                 if (rc)
4620                         return rc;
4621         } else
4622                 bnx2_init_context(bp);
4623
4624         if ((rc = bnx2_init_cpus(bp)) != 0)
4625                 return rc;
4626
4627         bnx2_init_nvram(bp);
4628
4629         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4630
4631         val = REG_RD(bp, BNX2_MQ_CONFIG);
4632         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4633         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4634         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4635                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4636
4637         REG_WR(bp, BNX2_MQ_CONFIG, val);
4638
4639         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4640         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4641         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4642
4643         val = (BCM_PAGE_BITS - 8) << 24;
4644         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4645
4646         /* Configure page size. */
4647         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4648         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4649         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4650         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4651
4652         val = bp->mac_addr[0] +
4653               (bp->mac_addr[1] << 8) +
4654               (bp->mac_addr[2] << 16) +
4655               bp->mac_addr[3] +
4656               (bp->mac_addr[4] << 8) +
4657               (bp->mac_addr[5] << 16);
4658         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4659
4660         /* Program the MTU.  Also include 4 bytes for CRC32. */
4661         mtu = bp->dev->mtu;
4662         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4663         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4664                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4665         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4666
4667         if (mtu < 1500)
4668                 mtu = 1500;
4669
4670         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4671         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4672         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4673
4674         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4675                 bp->bnx2_napi[i].last_status_idx = 0;
4676
4677         bp->idle_chk_status_idx = 0xffff;
4678
4679         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4680
4681         /* Set up how to generate a link change interrupt. */
4682         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4683
4684         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4685                (u64) bp->status_blk_mapping & 0xffffffff);
4686         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4687
4688         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4689                (u64) bp->stats_blk_mapping & 0xffffffff);
4690         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4691                (u64) bp->stats_blk_mapping >> 32);
4692
4693         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4694                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4695
4696         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4697                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4698
4699         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4700                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4701
4702         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4703
4704         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4705
4706         REG_WR(bp, BNX2_HC_COM_TICKS,
4707                (bp->com_ticks_int << 16) | bp->com_ticks);
4708
4709         REG_WR(bp, BNX2_HC_CMD_TICKS,
4710                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4711
4712         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4713                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4714         else
4715                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4716         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4717
4718         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4719                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4720         else {
4721                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4722                       BNX2_HC_CONFIG_COLLECT_STATS;
4723         }
4724
4725         if (bp->irq_nvecs > 1) {
4726                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4727                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4728
4729                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4730         }
4731
4732         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4733                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4734
4735         REG_WR(bp, BNX2_HC_CONFIG, val);
4736
4737         for (i = 1; i < bp->irq_nvecs; i++) {
4738                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4739                            BNX2_HC_SB_CONFIG_1;
4740
4741                 REG_WR(bp, base,
4742                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4743                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4744                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4745
4746                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4747                         (bp->tx_quick_cons_trip_int << 16) |
4748                          bp->tx_quick_cons_trip);
4749
4750                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4751                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4752
4753                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4754                        (bp->rx_quick_cons_trip_int << 16) |
4755                         bp->rx_quick_cons_trip);
4756
4757                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4758                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4759         }
4760
4761         /* Clear internal stats counters. */
4762         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4763
4764         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4765
4766         /* Initialize the receive filter. */
4767         bnx2_set_rx_mode(bp->dev);
4768
4769         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4770                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4771                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4772                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4773         }
4774         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4775                           1, 0);
4776
4777         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4778         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4779
4780         udelay(20);
4781
4782         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4783
4784         return rc;
4785 }
4786
4787 static void
4788 bnx2_clear_ring_states(struct bnx2 *bp)
4789 {
4790         struct bnx2_napi *bnapi;
4791         struct bnx2_tx_ring_info *txr;
4792         struct bnx2_rx_ring_info *rxr;
4793         int i;
4794
4795         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4796                 bnapi = &bp->bnx2_napi[i];
4797                 txr = &bnapi->tx_ring;
4798                 rxr = &bnapi->rx_ring;
4799
4800                 txr->tx_cons = 0;
4801                 txr->hw_tx_cons = 0;
4802                 rxr->rx_prod_bseq = 0;
4803                 rxr->rx_prod = 0;
4804                 rxr->rx_cons = 0;
4805                 rxr->rx_pg_prod = 0;
4806                 rxr->rx_pg_cons = 0;
4807         }
4808 }
4809
4810 static void
4811 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4812 {
4813         u32 val, offset0, offset1, offset2, offset3;
4814         u32 cid_addr = GET_CID_ADDR(cid);
4815
4816         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4817                 offset0 = BNX2_L2CTX_TYPE_XI;
4818                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4819                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4820                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4821         } else {
4822                 offset0 = BNX2_L2CTX_TYPE;
4823                 offset1 = BNX2_L2CTX_CMD_TYPE;
4824                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4825                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4826         }
4827         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4828         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4829
4830         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4831         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4832
4833         val = (u64) txr->tx_desc_mapping >> 32;
4834         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4835
4836         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4837         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4838 }
4839
4840 static void
4841 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4842 {
4843         struct tx_bd *txbd;
4844         u32 cid = TX_CID;
4845         struct bnx2_napi *bnapi;
4846         struct bnx2_tx_ring_info *txr;
4847
4848         bnapi = &bp->bnx2_napi[ring_num];
4849         txr = &bnapi->tx_ring;
4850
4851         if (ring_num == 0)
4852                 cid = TX_CID;
4853         else
4854                 cid = TX_TSS_CID + ring_num - 1;
4855
4856         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4857
4858         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4859
4860         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4861         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4862
4863         txr->tx_prod = 0;
4864         txr->tx_prod_bseq = 0;
4865
4866         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4867         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4868
4869         bnx2_init_tx_context(bp, cid, txr);
4870 }
4871
4872 static void
4873 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4874                      int num_rings)
4875 {
4876         int i;
4877         struct rx_bd *rxbd;
4878
4879         for (i = 0; i < num_rings; i++) {
4880                 int j;
4881
4882                 rxbd = &rx_ring[i][0];
4883                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4884                         rxbd->rx_bd_len = buf_size;
4885                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4886                 }
4887                 if (i == (num_rings - 1))
4888                         j = 0;
4889                 else
4890                         j = i + 1;
4891                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4892                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4893         }
4894 }
4895
4896 static void
4897 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4898 {
4899         int i;
4900         u16 prod, ring_prod;
4901         u32 cid, rx_cid_addr, val;
4902         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4903         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4904
4905         if (ring_num == 0)
4906                 cid = RX_CID;
4907         else
4908                 cid = RX_RSS_CID + ring_num - 1;
4909
4910         rx_cid_addr = GET_CID_ADDR(cid);
4911
4912         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4913                              bp->rx_buf_use_size, bp->rx_max_ring);
4914
4915         bnx2_init_rx_context(bp, cid);
4916
4917         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4918                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4919                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4920         }
4921
4922         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4923         if (bp->rx_pg_ring_size) {
4924                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4925                                      rxr->rx_pg_desc_mapping,
4926                                      PAGE_SIZE, bp->rx_max_pg_ring);
4927                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4928                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4929                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4930                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4931
4932                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4933                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4934
4935                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4936                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4937
4938                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4939                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4940         }
4941
4942         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4943         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4944
4945         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4946         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4947
4948         ring_prod = prod = rxr->rx_pg_prod;
4949         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4950                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4951                         break;
4952                 prod = NEXT_RX_BD(prod);
4953                 ring_prod = RX_PG_RING_IDX(prod);
4954         }
4955         rxr->rx_pg_prod = prod;
4956
4957         ring_prod = prod = rxr->rx_prod;
4958         for (i = 0; i < bp->rx_ring_size; i++) {
4959                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4960                         break;
4961                 prod = NEXT_RX_BD(prod);
4962                 ring_prod = RX_RING_IDX(prod);
4963         }
4964         rxr->rx_prod = prod;
4965
4966         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4967         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4968         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4969
4970         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4971         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4972
4973         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4974 }
4975
4976 static void
4977 bnx2_init_all_rings(struct bnx2 *bp)
4978 {
4979         int i;
4980         u32 val;
4981
4982         bnx2_clear_ring_states(bp);
4983
4984         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4985         for (i = 0; i < bp->num_tx_rings; i++)
4986                 bnx2_init_tx_ring(bp, i);
4987
4988         if (bp->num_tx_rings > 1)
4989                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4990                        (TX_TSS_CID << 7));
4991
4992         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4993         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4994
4995         for (i = 0; i < bp->num_rx_rings; i++)
4996                 bnx2_init_rx_ring(bp, i);
4997
4998         if (bp->num_rx_rings > 1) {
4999                 u32 tbl_32;
5000                 u8 *tbl = (u8 *) &tbl_32;
5001
5002                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5003                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5004
5005                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5006                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5007                         if ((i % 4) == 3)
5008                                 bnx2_reg_wr_ind(bp,
5009                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5010                                                 cpu_to_be32(tbl_32));
5011                 }
5012
5013                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5014                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5015
5016                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5017
5018         }
5019 }
5020
5021 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5022 {
5023         u32 max, num_rings = 1;
5024
5025         while (ring_size > MAX_RX_DESC_CNT) {
5026                 ring_size -= MAX_RX_DESC_CNT;
5027                 num_rings++;
5028         }
5029         /* round to next power of 2 */
5030         max = max_size;
5031         while ((max & num_rings) == 0)
5032                 max >>= 1;
5033
5034         if (num_rings != max)
5035                 max <<= 1;
5036
5037         return max;
5038 }
5039
5040 static void
5041 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5042 {
5043         u32 rx_size, rx_space, jumbo_size;
5044
5045         /* 8 for CRC and VLAN */
5046         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5047
5048         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5049                 sizeof(struct skb_shared_info);
5050
5051         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5052         bp->rx_pg_ring_size = 0;
5053         bp->rx_max_pg_ring = 0;
5054         bp->rx_max_pg_ring_idx = 0;
5055         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5056                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5057
5058                 jumbo_size = size * pages;
5059                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5060                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5061
5062                 bp->rx_pg_ring_size = jumbo_size;
5063                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5064                                                         MAX_RX_PG_RINGS);
5065                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5066                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5067                 bp->rx_copy_thresh = 0;
5068         }
5069
5070         bp->rx_buf_use_size = rx_size;
5071         /* hw alignment */
5072         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5073         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5074         bp->rx_ring_size = size;
5075         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5076         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5077 }
5078
5079 static void
5080 bnx2_free_tx_skbs(struct bnx2 *bp)
5081 {
5082         int i;
5083
5084         for (i = 0; i < bp->num_tx_rings; i++) {
5085                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5086                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5087                 int j;
5088
5089                 if (txr->tx_buf_ring == NULL)
5090                         continue;
5091
5092                 for (j = 0; j < TX_DESC_CNT; ) {
5093                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5094                         struct sk_buff *skb = tx_buf->skb;
5095
5096                         if (skb == NULL) {
5097                                 j++;
5098                                 continue;
5099                         }
5100
5101                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5102
5103                         tx_buf->skb = NULL;
5104
5105                         j += skb_shinfo(skb)->nr_frags + 1;
5106                         dev_kfree_skb(skb);
5107                 }
5108         }
5109 }
5110
5111 static void
5112 bnx2_free_rx_skbs(struct bnx2 *bp)
5113 {
5114         int i;
5115
5116         for (i = 0; i < bp->num_rx_rings; i++) {
5117                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5118                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5119                 int j;
5120
5121                 if (rxr->rx_buf_ring == NULL)
5122                         return;
5123
5124                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5125                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5126                         struct sk_buff *skb = rx_buf->skb;
5127
5128                         if (skb == NULL)
5129                                 continue;
5130
5131                         pci_unmap_single(bp->pdev,
5132                                          pci_unmap_addr(rx_buf, mapping),
5133                                          bp->rx_buf_use_size,
5134                                          PCI_DMA_FROMDEVICE);
5135
5136                         rx_buf->skb = NULL;
5137
5138                         dev_kfree_skb(skb);
5139                 }
5140                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5141                         bnx2_free_rx_page(bp, rxr, j);
5142         }
5143 }
5144
5145 static void
5146 bnx2_free_skbs(struct bnx2 *bp)
5147 {
5148         bnx2_free_tx_skbs(bp);
5149         bnx2_free_rx_skbs(bp);
5150 }
5151
5152 static int
5153 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5154 {
5155         int rc;
5156
5157         rc = bnx2_reset_chip(bp, reset_code);
5158         bnx2_free_skbs(bp);
5159         if (rc)
5160                 return rc;
5161
5162         if ((rc = bnx2_init_chip(bp)) != 0)
5163                 return rc;
5164
5165         bnx2_init_all_rings(bp);
5166         return 0;
5167 }
5168
5169 static int
5170 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5171 {
5172         int rc;
5173
5174         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5175                 return rc;
5176
5177         spin_lock_bh(&bp->phy_lock);
5178         bnx2_init_phy(bp, reset_phy);
5179         bnx2_set_link(bp);
5180         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5181                 bnx2_remote_phy_event(bp);
5182         spin_unlock_bh(&bp->phy_lock);
5183         return 0;
5184 }
5185
5186 static int
5187 bnx2_shutdown_chip(struct bnx2 *bp)
5188 {
5189         u32 reset_code;
5190
5191         if (bp->flags & BNX2_FLAG_NO_WOL)
5192                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5193         else if (bp->wol)
5194                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5195         else
5196                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5197
5198         return bnx2_reset_chip(bp, reset_code);
5199 }
5200
5201 static int
5202 bnx2_test_registers(struct bnx2 *bp)
5203 {
5204         int ret;
5205         int i, is_5709;
5206         static const struct {
5207                 u16   offset;
5208                 u16   flags;
5209 #define BNX2_FL_NOT_5709        1
5210                 u32   rw_mask;
5211                 u32   ro_mask;
5212         } reg_tbl[] = {
5213                 { 0x006c, 0, 0x00000000, 0x0000003f },
5214                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5215                 { 0x0094, 0, 0x00000000, 0x00000000 },
5216
5217                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5218                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5219                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5220                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5221                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5222                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5223                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5224                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5225                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5226
5227                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5230                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5231                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233
5234                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5235                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5236                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5237
5238                 { 0x1000, 0, 0x00000000, 0x00000001 },
5239                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5240
5241                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5242                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5243                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5244                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5245                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5246                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5247                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5248                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5249                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5250                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5251
5252                 { 0x1800, 0, 0x00000000, 0x00000001 },
5253                 { 0x1804, 0, 0x00000000, 0x00000003 },
5254
5255                 { 0x2800, 0, 0x00000000, 0x00000001 },
5256                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5257                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5258                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5259                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5260                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5261                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5262                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5263                 { 0x2840, 0, 0x00000000, 0xffffffff },
5264                 { 0x2844, 0, 0x00000000, 0xffffffff },
5265                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5266                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5267
5268                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5269                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5270
5271                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5272                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5273                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5274                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5275                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5276                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5277                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5278                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5279                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5280
5281                 { 0x5004, 0, 0x00000000, 0x0000007f },
5282                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5283
5284                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5285                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5286                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5287                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5288                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5289                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5290                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5291                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5292                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5293
5294                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5295                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5296                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5297                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5298                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5299                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5300                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5301                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5302                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5303                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5304                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5305                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5306                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5307                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5308                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5309                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5310                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5311                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5312                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5313                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5314                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5315                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5316                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5317
5318                 { 0xffff, 0, 0x00000000, 0x00000000 },
5319         };
5320
5321         ret = 0;
5322         is_5709 = 0;
5323         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5324                 is_5709 = 1;
5325
5326         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5327                 u32 offset, rw_mask, ro_mask, save_val, val;
5328                 u16 flags = reg_tbl[i].flags;
5329
5330                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5331                         continue;
5332
5333                 offset = (u32) reg_tbl[i].offset;
5334                 rw_mask = reg_tbl[i].rw_mask;
5335                 ro_mask = reg_tbl[i].ro_mask;
5336
5337                 save_val = readl(bp->regview + offset);
5338
5339                 writel(0, bp->regview + offset);
5340
5341                 val = readl(bp->regview + offset);
5342                 if ((val & rw_mask) != 0) {
5343                         goto reg_test_err;
5344                 }
5345
5346                 if ((val & ro_mask) != (save_val & ro_mask)) {
5347                         goto reg_test_err;
5348                 }
5349
5350                 writel(0xffffffff, bp->regview + offset);
5351
5352                 val = readl(bp->regview + offset);
5353                 if ((val & rw_mask) != rw_mask) {
5354                         goto reg_test_err;
5355                 }
5356
5357                 if ((val & ro_mask) != (save_val & ro_mask)) {
5358                         goto reg_test_err;
5359                 }
5360
5361                 writel(save_val, bp->regview + offset);
5362                 continue;
5363
5364 reg_test_err:
5365                 writel(save_val, bp->regview + offset);
5366                 ret = -ENODEV;
5367                 break;
5368         }
5369         return ret;
5370 }
5371
5372 static int
5373 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5374 {
5375         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5376                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5377         int i;
5378
5379         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5380                 u32 offset;
5381
5382                 for (offset = 0; offset < size; offset += 4) {
5383
5384                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5385
5386                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5387                                 test_pattern[i]) {
5388                                 return -ENODEV;
5389                         }
5390                 }
5391         }
5392         return 0;
5393 }
5394
5395 static int
5396 bnx2_test_memory(struct bnx2 *bp)
5397 {
5398         int ret = 0;
5399         int i;
5400         static struct mem_entry {
5401                 u32   offset;
5402                 u32   len;
5403         } mem_tbl_5706[] = {
5404                 { 0x60000,  0x4000 },
5405                 { 0xa0000,  0x3000 },
5406                 { 0xe0000,  0x4000 },
5407                 { 0x120000, 0x4000 },
5408                 { 0x1a0000, 0x4000 },
5409                 { 0x160000, 0x4000 },
5410                 { 0xffffffff, 0    },
5411         },
5412         mem_tbl_5709[] = {
5413                 { 0x60000,  0x4000 },
5414                 { 0xa0000,  0x3000 },
5415                 { 0xe0000,  0x4000 },
5416                 { 0x120000, 0x4000 },
5417                 { 0x1a0000, 0x4000 },
5418                 { 0xffffffff, 0    },
5419         };
5420         struct mem_entry *mem_tbl;
5421
5422         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5423                 mem_tbl = mem_tbl_5709;
5424         else
5425                 mem_tbl = mem_tbl_5706;
5426
5427         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5428                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5429                         mem_tbl[i].len)) != 0) {
5430                         return ret;
5431                 }
5432         }
5433
5434         return ret;
5435 }
5436
5437 #define BNX2_MAC_LOOPBACK       0
5438 #define BNX2_PHY_LOOPBACK       1
5439
5440 static int
5441 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5442 {
5443         unsigned int pkt_size, num_pkts, i;
5444         struct sk_buff *skb, *rx_skb;
5445         unsigned char *packet;
5446         u16 rx_start_idx, rx_idx;
5447         dma_addr_t map;
5448         struct tx_bd *txbd;
5449         struct sw_bd *rx_buf;
5450         struct l2_fhdr *rx_hdr;
5451         int ret = -ENODEV;
5452         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5453         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5454         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5455
5456         tx_napi = bnapi;
5457
5458         txr = &tx_napi->tx_ring;
5459         rxr = &bnapi->rx_ring;
5460         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5461                 bp->loopback = MAC_LOOPBACK;
5462                 bnx2_set_mac_loopback(bp);
5463         }
5464         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5465                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466                         return 0;
5467
5468                 bp->loopback = PHY_LOOPBACK;
5469                 bnx2_set_phy_loopback(bp);
5470         }
5471         else
5472                 return -EINVAL;
5473
5474         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5475         skb = netdev_alloc_skb(bp->dev, pkt_size);
5476         if (!skb)
5477                 return -ENOMEM;
5478         packet = skb_put(skb, pkt_size);
5479         memcpy(packet, bp->dev->dev_addr, 6);
5480         memset(packet + 6, 0x0, 8);
5481         for (i = 14; i < pkt_size; i++)
5482                 packet[i] = (unsigned char) (i & 0xff);
5483
5484         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5485                 dev_kfree_skb(skb);
5486                 return -EIO;
5487         }
5488         map = skb_shinfo(skb)->dma_head;
5489
5490         REG_WR(bp, BNX2_HC_COMMAND,
5491                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5492
5493         REG_RD(bp, BNX2_HC_COMMAND);
5494
5495         udelay(5);
5496         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5497
5498         num_pkts = 0;
5499
5500         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5501
5502         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5503         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5504         txbd->tx_bd_mss_nbytes = pkt_size;
5505         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5506
5507         num_pkts++;
5508         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5509         txr->tx_prod_bseq += pkt_size;
5510
5511         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5512         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5513
5514         udelay(100);
5515
5516         REG_WR(bp, BNX2_HC_COMMAND,
5517                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5518
5519         REG_RD(bp, BNX2_HC_COMMAND);
5520
5521         udelay(5);
5522
5523         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5524         dev_kfree_skb(skb);
5525
5526         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5527                 goto loopback_test_done;
5528
5529         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5530         if (rx_idx != rx_start_idx + num_pkts) {
5531                 goto loopback_test_done;
5532         }
5533
5534         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5535         rx_skb = rx_buf->skb;
5536
5537         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5538         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5539
5540         pci_dma_sync_single_for_cpu(bp->pdev,
5541                 pci_unmap_addr(rx_buf, mapping),
5542                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5543
5544         if (rx_hdr->l2_fhdr_status &
5545                 (L2_FHDR_ERRORS_BAD_CRC |
5546                 L2_FHDR_ERRORS_PHY_DECODE |
5547                 L2_FHDR_ERRORS_ALIGNMENT |
5548                 L2_FHDR_ERRORS_TOO_SHORT |
5549                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5550
5551                 goto loopback_test_done;
5552         }
5553
5554         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5555                 goto loopback_test_done;
5556         }
5557
5558         for (i = 14; i < pkt_size; i++) {
5559                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5560                         goto loopback_test_done;
5561                 }
5562         }
5563
5564         ret = 0;
5565
5566 loopback_test_done:
5567         bp->loopback = 0;
5568         return ret;
5569 }
5570
5571 #define BNX2_MAC_LOOPBACK_FAILED        1
5572 #define BNX2_PHY_LOOPBACK_FAILED        2
5573 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5574                                          BNX2_PHY_LOOPBACK_FAILED)
5575
5576 static int
5577 bnx2_test_loopback(struct bnx2 *bp)
5578 {
5579         int rc = 0;
5580
5581         if (!netif_running(bp->dev))
5582                 return BNX2_LOOPBACK_FAILED;
5583
5584         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5585         spin_lock_bh(&bp->phy_lock);
5586         bnx2_init_phy(bp, 1);
5587         spin_unlock_bh(&bp->phy_lock);
5588         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5589                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5590         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5591                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5592         return rc;
5593 }
5594
5595 #define NVRAM_SIZE 0x200
5596 #define CRC32_RESIDUAL 0xdebb20e3
5597
5598 static int
5599 bnx2_test_nvram(struct bnx2 *bp)
5600 {
5601         __be32 buf[NVRAM_SIZE / 4];
5602         u8 *data = (u8 *) buf;
5603         int rc = 0;
5604         u32 magic, csum;
5605
5606         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5607                 goto test_nvram_done;
5608
5609         magic = be32_to_cpu(buf[0]);
5610         if (magic != 0x669955aa) {
5611                 rc = -ENODEV;
5612                 goto test_nvram_done;
5613         }
5614
5615         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5616                 goto test_nvram_done;
5617
5618         csum = ether_crc_le(0x100, data);
5619         if (csum != CRC32_RESIDUAL) {
5620                 rc = -ENODEV;
5621                 goto test_nvram_done;
5622         }
5623
5624         csum = ether_crc_le(0x100, data + 0x100);
5625         if (csum != CRC32_RESIDUAL) {
5626                 rc = -ENODEV;
5627         }
5628
5629 test_nvram_done:
5630         return rc;
5631 }
5632
5633 static int
5634 bnx2_test_link(struct bnx2 *bp)
5635 {
5636         u32 bmsr;
5637
5638         if (!netif_running(bp->dev))
5639                 return -ENODEV;
5640
5641         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5642                 if (bp->link_up)
5643                         return 0;
5644                 return -ENODEV;
5645         }
5646         spin_lock_bh(&bp->phy_lock);
5647         bnx2_enable_bmsr1(bp);
5648         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5649         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5650         bnx2_disable_bmsr1(bp);
5651         spin_unlock_bh(&bp->phy_lock);
5652
5653         if (bmsr & BMSR_LSTATUS) {
5654                 return 0;
5655         }
5656         return -ENODEV;
5657 }
5658
5659 static int
5660 bnx2_test_intr(struct bnx2 *bp)
5661 {
5662         int i;
5663         u16 status_idx;
5664
5665         if (!netif_running(bp->dev))
5666                 return -ENODEV;
5667
5668         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5669
5670         /* This register is not touched during run-time. */
5671         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5672         REG_RD(bp, BNX2_HC_COMMAND);
5673
5674         for (i = 0; i < 10; i++) {
5675                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5676                         status_idx) {
5677
5678                         break;
5679                 }
5680
5681                 msleep_interruptible(10);
5682         }
5683         if (i < 10)
5684                 return 0;
5685
5686         return -ENODEV;
5687 }
5688
5689 /* Determining link for parallel detection. */
5690 static int
5691 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5692 {
5693         u32 mode_ctl, an_dbg, exp;
5694
5695         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5696                 return 0;
5697
5698         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5699         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5700
5701         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5702                 return 0;
5703
5704         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5705         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5706         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5707
5708         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5709                 return 0;
5710
5711         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5712         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5713         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5714
5715         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5716                 return 0;
5717
5718         return 1;
5719 }
5720
5721 static void
5722 bnx2_5706_serdes_timer(struct bnx2 *bp)
5723 {
5724         int check_link = 1;
5725
5726         spin_lock(&bp->phy_lock);
5727         if (bp->serdes_an_pending) {
5728                 bp->serdes_an_pending--;
5729                 check_link = 0;
5730         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5731                 u32 bmcr;
5732
5733                 bp->current_interval = BNX2_TIMER_INTERVAL;
5734
5735                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5736
5737                 if (bmcr & BMCR_ANENABLE) {
5738                         if (bnx2_5706_serdes_has_link(bp)) {
5739                                 bmcr &= ~BMCR_ANENABLE;
5740                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5741                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5742                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5743                         }
5744                 }
5745         }
5746         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5747                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5748                 u32 phy2;
5749
5750                 bnx2_write_phy(bp, 0x17, 0x0f01);
5751                 bnx2_read_phy(bp, 0x15, &phy2);
5752                 if (phy2 & 0x20) {
5753                         u32 bmcr;
5754
5755                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5756                         bmcr |= BMCR_ANENABLE;
5757                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5758
5759                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5760                 }
5761         } else
5762                 bp->current_interval = BNX2_TIMER_INTERVAL;
5763
5764         if (check_link) {
5765                 u32 val;
5766
5767                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5768                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5769                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5770
5771                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5772                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5773                                 bnx2_5706s_force_link_dn(bp, 1);
5774                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5775                         } else
5776                                 bnx2_set_link(bp);
5777                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5778                         bnx2_set_link(bp);
5779         }
5780         spin_unlock(&bp->phy_lock);
5781 }
5782
5783 static void
5784 bnx2_5708_serdes_timer(struct bnx2 *bp)
5785 {
5786         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5787                 return;
5788
5789         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5790                 bp->serdes_an_pending = 0;
5791                 return;
5792         }
5793
5794         spin_lock(&bp->phy_lock);
5795         if (bp->serdes_an_pending)
5796                 bp->serdes_an_pending--;
5797         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5798                 u32 bmcr;
5799
5800                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5801                 if (bmcr & BMCR_ANENABLE) {
5802                         bnx2_enable_forced_2g5(bp);
5803                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5804                 } else {
5805                         bnx2_disable_forced_2g5(bp);
5806                         bp->serdes_an_pending = 2;
5807                         bp->current_interval = BNX2_TIMER_INTERVAL;
5808                 }
5809
5810         } else
5811                 bp->current_interval = BNX2_TIMER_INTERVAL;
5812
5813         spin_unlock(&bp->phy_lock);
5814 }
5815
5816 static void
5817 bnx2_timer(unsigned long data)
5818 {
5819         struct bnx2 *bp = (struct bnx2 *) data;
5820
5821         if (!netif_running(bp->dev))
5822                 return;
5823
5824         if (atomic_read(&bp->intr_sem) != 0)
5825                 goto bnx2_restart_timer;
5826
5827         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5828              BNX2_FLAG_USING_MSI)
5829                 bnx2_chk_missed_msi(bp);
5830
5831         bnx2_send_heart_beat(bp);
5832
5833         bp->stats_blk->stat_FwRxDrop =
5834                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5835
5836         /* workaround occasional corrupted counters */
5837         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5838                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5839                                             BNX2_HC_COMMAND_STATS_NOW);
5840
5841         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5842                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5843                         bnx2_5706_serdes_timer(bp);
5844                 else
5845                         bnx2_5708_serdes_timer(bp);
5846         }
5847
5848 bnx2_restart_timer:
5849         mod_timer(&bp->timer, jiffies + bp->current_interval);
5850 }
5851
5852 static int
5853 bnx2_request_irq(struct bnx2 *bp)
5854 {
5855         unsigned long flags;
5856         struct bnx2_irq *irq;
5857         int rc = 0, i;
5858
5859         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5860                 flags = 0;
5861         else
5862                 flags = IRQF_SHARED;
5863
5864         for (i = 0; i < bp->irq_nvecs; i++) {
5865                 irq = &bp->irq_tbl[i];
5866                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5867                                  &bp->bnx2_napi[i]);
5868                 if (rc)
5869                         break;
5870                 irq->requested = 1;
5871         }
5872         return rc;
5873 }
5874
5875 static void
5876 bnx2_free_irq(struct bnx2 *bp)
5877 {
5878         struct bnx2_irq *irq;
5879         int i;
5880
5881         for (i = 0; i < bp->irq_nvecs; i++) {
5882                 irq = &bp->irq_tbl[i];
5883                 if (irq->requested)
5884                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5885                 irq->requested = 0;
5886         }
5887         if (bp->flags & BNX2_FLAG_USING_MSI)
5888                 pci_disable_msi(bp->pdev);
5889         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5890                 pci_disable_msix(bp->pdev);
5891
5892         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5893 }
5894
5895 static void
5896 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5897 {
5898         int i, rc;
5899         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5900         struct net_device *dev = bp->dev;
5901         const int len = sizeof(bp->irq_tbl[0].name);
5902
5903         bnx2_setup_msix_tbl(bp);
5904         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5905         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5906         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5907
5908         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5909                 msix_ent[i].entry = i;
5910                 msix_ent[i].vector = 0;
5911         }
5912
5913         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5914         if (rc != 0)
5915                 return;
5916
5917         bp->irq_nvecs = msix_vecs;
5918         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5919         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5920                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5921                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5922                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5923         }
5924 }
5925
5926 static void
5927 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5928 {
5929         int cpus = num_online_cpus();
5930         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5931
5932         bp->irq_tbl[0].handler = bnx2_interrupt;
5933         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5934         bp->irq_nvecs = 1;
5935         bp->irq_tbl[0].vector = bp->pdev->irq;
5936
5937         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5938                 bnx2_enable_msix(bp, msix_vecs);
5939
5940         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5941             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5942                 if (pci_enable_msi(bp->pdev) == 0) {
5943                         bp->flags |= BNX2_FLAG_USING_MSI;
5944                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5945                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5946                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5947                         } else
5948                                 bp->irq_tbl[0].handler = bnx2_msi;
5949
5950                         bp->irq_tbl[0].vector = bp->pdev->irq;
5951                 }
5952         }
5953
5954         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5955         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5956
5957         bp->num_rx_rings = bp->irq_nvecs;
5958 }
5959
5960 /* Called with rtnl_lock */
5961 static int
5962 bnx2_open(struct net_device *dev)
5963 {
5964         struct bnx2 *bp = netdev_priv(dev);
5965         int rc;
5966
5967         netif_carrier_off(dev);
5968
5969         bnx2_set_power_state(bp, PCI_D0);
5970         bnx2_disable_int(bp);
5971
5972         bnx2_setup_int_mode(bp, disable_msi);
5973         bnx2_napi_enable(bp);
5974         rc = bnx2_alloc_mem(bp);
5975         if (rc)
5976                 goto open_err;
5977
5978         rc = bnx2_request_irq(bp);
5979         if (rc)
5980                 goto open_err;
5981
5982         rc = bnx2_init_nic(bp, 1);
5983         if (rc)
5984                 goto open_err;
5985
5986         mod_timer(&bp->timer, jiffies + bp->current_interval);
5987
5988         atomic_set(&bp->intr_sem, 0);
5989
5990         bnx2_enable_int(bp);
5991
5992         if (bp->flags & BNX2_FLAG_USING_MSI) {
5993                 /* Test MSI to make sure it is working
5994                  * If MSI test fails, go back to INTx mode
5995                  */
5996                 if (bnx2_test_intr(bp) != 0) {
5997                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5998                                " using MSI, switching to INTx mode. Please"
5999                                " report this failure to the PCI maintainer"
6000                                " and include system chipset information.\n",
6001                                bp->dev->name);
6002
6003                         bnx2_disable_int(bp);
6004                         bnx2_free_irq(bp);
6005
6006                         bnx2_setup_int_mode(bp, 1);
6007
6008                         rc = bnx2_init_nic(bp, 0);
6009
6010                         if (!rc)
6011                                 rc = bnx2_request_irq(bp);
6012
6013                         if (rc) {
6014                                 del_timer_sync(&bp->timer);
6015                                 goto open_err;
6016                         }
6017                         bnx2_enable_int(bp);
6018                 }
6019         }
6020         if (bp->flags & BNX2_FLAG_USING_MSI)
6021                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6022         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6023                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6024
6025         netif_tx_start_all_queues(dev);
6026
6027         return 0;
6028
6029 open_err:
6030         bnx2_napi_disable(bp);
6031         bnx2_free_skbs(bp);
6032         bnx2_free_irq(bp);
6033         bnx2_free_mem(bp);
6034         return rc;
6035 }
6036
6037 static void
6038 bnx2_reset_task(struct work_struct *work)
6039 {
6040         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6041
6042         if (!netif_running(bp->dev))
6043                 return;
6044
6045         bnx2_netif_stop(bp);
6046
6047         bnx2_init_nic(bp, 1);
6048
6049         atomic_set(&bp->intr_sem,&