4fc9d1653cda0d66a5cf78fe3bf594462d84172a
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & MSIX_CAP_FLAG)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & MSIX_CAP_FLAG) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & PHY_SERDES_FLAG) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static int
1190 bnx2_set_link(struct bnx2 *bp)
1191 {
1192         u32 bmsr;
1193         u8 link_up;
1194
1195         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1196                 bp->link_up = 1;
1197                 return 0;
1198         }
1199
1200         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1201                 return 0;
1202
1203         link_up = bp->link_up;
1204
1205         bnx2_enable_bmsr1(bp);
1206         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208         bnx2_disable_bmsr1(bp);
1209
1210         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1212                 u32 val;
1213
1214                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215                 if (val & BNX2_EMAC_STATUS_LINK)
1216                         bmsr |= BMSR_LSTATUS;
1217                 else
1218                         bmsr &= ~BMSR_LSTATUS;
1219         }
1220
1221         if (bmsr & BMSR_LSTATUS) {
1222                 bp->link_up = 1;
1223
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226                                 bnx2_5706s_linkup(bp);
1227                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228                                 bnx2_5708s_linkup(bp);
1229                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230                                 bnx2_5709s_linkup(bp);
1231                 }
1232                 else {
1233                         bnx2_copper_linkup(bp);
1234                 }
1235                 bnx2_resolve_flow_ctrl(bp);
1236         }
1237         else {
1238                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239                     (bp->autoneg & AUTONEG_SPEED))
1240                         bnx2_disable_forced_2g5(bp);
1241
1242                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1243                 bp->link_up = 0;
1244         }
1245
1246         if (bp->link_up != link_up) {
1247                 bnx2_report_link(bp);
1248         }
1249
1250         bnx2_set_mac_link(bp);
1251
1252         return 0;
1253 }
1254
1255 static int
1256 bnx2_reset_phy(struct bnx2 *bp)
1257 {
1258         int i;
1259         u32 reg;
1260
1261         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1262
1263 #define PHY_RESET_MAX_WAIT 100
1264         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1265                 udelay(10);
1266
1267                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1268                 if (!(reg & BMCR_RESET)) {
1269                         udelay(20);
1270                         break;
1271                 }
1272         }
1273         if (i == PHY_RESET_MAX_WAIT) {
1274                 return -EBUSY;
1275         }
1276         return 0;
1277 }
1278
1279 static u32
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1281 {
1282         u32 adv = 0;
1283
1284         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1286
1287                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288                         adv = ADVERTISE_1000XPAUSE;
1289                 }
1290                 else {
1291                         adv = ADVERTISE_PAUSE_CAP;
1292                 }
1293         }
1294         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296                         adv = ADVERTISE_1000XPSE_ASYM;
1297                 }
1298                 else {
1299                         adv = ADVERTISE_PAUSE_ASYM;
1300                 }
1301         }
1302         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305                 }
1306                 else {
1307                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1308                 }
1309         }
1310         return adv;
1311 }
1312
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1314
1315 static int
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1317 {
1318         u32 speed_arg = 0, pause_adv;
1319
1320         pause_adv = bnx2_phy_get_pause_adv(bp);
1321
1322         if (bp->autoneg & AUTONEG_SPEED) {
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324                 if (bp->advertising & ADVERTISED_10baseT_Half)
1325                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326                 if (bp->advertising & ADVERTISED_10baseT_Full)
1327                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328                 if (bp->advertising & ADVERTISED_100baseT_Half)
1329                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330                 if (bp->advertising & ADVERTISED_100baseT_Full)
1331                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1336         } else {
1337                 if (bp->req_line_speed == SPEED_2500)
1338                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339                 else if (bp->req_line_speed == SPEED_1000)
1340                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341                 else if (bp->req_line_speed == SPEED_100) {
1342                         if (bp->req_duplex == DUPLEX_FULL)
1343                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1344                         else
1345                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346                 } else if (bp->req_line_speed == SPEED_10) {
1347                         if (bp->req_duplex == DUPLEX_FULL)
1348                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1349                         else
1350                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 }
1352         }
1353
1354         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1358
1359         if (port == PORT_TP)
1360                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1362
1363         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1364
1365         spin_unlock_bh(&bp->phy_lock);
1366         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367         spin_lock_bh(&bp->phy_lock);
1368
1369         return 0;
1370 }
1371
1372 static int
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1374 {
1375         u32 adv, bmcr;
1376         u32 new_adv = 0;
1377
1378         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379                 return (bnx2_setup_remote_phy(bp, port));
1380
1381         if (!(bp->autoneg & AUTONEG_SPEED)) {
1382                 u32 new_bmcr;
1383                 int force_link_down = 0;
1384
1385                 if (bp->req_line_speed == SPEED_2500) {
1386                         if (!bnx2_test_and_enable_2g5(bp))
1387                                 force_link_down = 1;
1388                 } else if (bp->req_line_speed == SPEED_1000) {
1389                         if (bnx2_test_and_disable_2g5(bp))
1390                                 force_link_down = 1;
1391                 }
1392                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1394
1395                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397                 new_bmcr |= BMCR_SPEED1000;
1398
1399                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400                         if (bp->req_line_speed == SPEED_2500)
1401                                 bnx2_enable_forced_2g5(bp);
1402                         else if (bp->req_line_speed == SPEED_1000) {
1403                                 bnx2_disable_forced_2g5(bp);
1404                                 new_bmcr &= ~0x2000;
1405                         }
1406
1407                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408                         if (bp->req_line_speed == SPEED_2500)
1409                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1410                         else
1411                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1412                 }
1413
1414                 if (bp->req_duplex == DUPLEX_FULL) {
1415                         adv |= ADVERTISE_1000XFULL;
1416                         new_bmcr |= BMCR_FULLDPLX;
1417                 }
1418                 else {
1419                         adv |= ADVERTISE_1000XHALF;
1420                         new_bmcr &= ~BMCR_FULLDPLX;
1421                 }
1422                 if ((new_bmcr != bmcr) || (force_link_down)) {
1423                         /* Force a link down visible on the other side */
1424                         if (bp->link_up) {
1425                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1426                                                ~(ADVERTISE_1000XFULL |
1427                                                  ADVERTISE_1000XHALF));
1428                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429                                         BMCR_ANRESTART | BMCR_ANENABLE);
1430
1431                                 bp->link_up = 0;
1432                                 netif_carrier_off(bp->dev);
1433                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434                                 bnx2_report_link(bp);
1435                         }
1436                         bnx2_write_phy(bp, bp->mii_adv, adv);
1437                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1438                 } else {
1439                         bnx2_resolve_flow_ctrl(bp);
1440                         bnx2_set_mac_link(bp);
1441                 }
1442                 return 0;
1443         }
1444
1445         bnx2_test_and_enable_2g5(bp);
1446
1447         if (bp->advertising & ADVERTISED_1000baseT_Full)
1448                 new_adv |= ADVERTISE_1000XFULL;
1449
1450         new_adv |= bnx2_phy_get_pause_adv(bp);
1451
1452         bnx2_read_phy(bp, bp->mii_adv, &adv);
1453         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455         bp->serdes_an_pending = 0;
1456         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457                 /* Force a link down visible on the other side */
1458                 if (bp->link_up) {
1459                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460                         spin_unlock_bh(&bp->phy_lock);
1461                         msleep(20);
1462                         spin_lock_bh(&bp->phy_lock);
1463                 }
1464
1465                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1467                         BMCR_ANENABLE);
1468                 /* Speed up link-up time when the link partner
1469                  * does not autonegotiate which is very common
1470                  * in blade servers. Some blade servers use
1471                  * IPMI for kerboard input and it's important
1472                  * to minimize link disruptions. Autoneg. involves
1473                  * exchanging base pages plus 3 next pages and
1474                  * normally completes in about 120 msec.
1475                  */
1476                 bp->current_interval = SERDES_AN_TIMEOUT;
1477                 bp->serdes_an_pending = 1;
1478                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1479         } else {
1480                 bnx2_resolve_flow_ctrl(bp);
1481                 bnx2_set_mac_link(bp);
1482         }
1483
1484         return 0;
1485 }
1486
1487 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1488         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1489                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490                 (ADVERTISED_1000baseT_Full)
1491
1492 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1493         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1494         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1495         ADVERTISED_1000baseT_Full)
1496
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1499
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1501
1502 static void
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1504 {
1505         u32 link;
1506
1507         if (bp->phy_port == PORT_TP)
1508                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1509         else
1510                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1511
1512         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513                 bp->req_line_speed = 0;
1514                 bp->autoneg |= AUTONEG_SPEED;
1515                 bp->advertising = ADVERTISED_Autoneg;
1516                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517                         bp->advertising |= ADVERTISED_10baseT_Half;
1518                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519                         bp->advertising |= ADVERTISED_10baseT_Full;
1520                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521                         bp->advertising |= ADVERTISED_100baseT_Half;
1522                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523                         bp->advertising |= ADVERTISED_100baseT_Full;
1524                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525                         bp->advertising |= ADVERTISED_1000baseT_Full;
1526                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527                         bp->advertising |= ADVERTISED_2500baseX_Full;
1528         } else {
1529                 bp->autoneg = 0;
1530                 bp->advertising = 0;
1531                 bp->req_duplex = DUPLEX_FULL;
1532                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533                         bp->req_line_speed = SPEED_10;
1534                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535                                 bp->req_duplex = DUPLEX_HALF;
1536                 }
1537                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538                         bp->req_line_speed = SPEED_100;
1539                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540                                 bp->req_duplex = DUPLEX_HALF;
1541                 }
1542                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543                         bp->req_line_speed = SPEED_1000;
1544                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545                         bp->req_line_speed = SPEED_2500;
1546         }
1547 }
1548
1549 static void
1550 bnx2_set_default_link(struct bnx2 *bp)
1551 {
1552         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553                 return bnx2_set_default_remote_link(bp);
1554
1555         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556         bp->req_line_speed = 0;
1557         if (bp->phy_flags & PHY_SERDES_FLAG) {
1558                 u32 reg;
1559
1560                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1561
1562                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1565                         bp->autoneg = 0;
1566                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1567                         bp->req_duplex = DUPLEX_FULL;
1568                 }
1569         } else
1570                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1571 }
1572
1573 static void
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1575 {
1576         u32 msg;
1577         u32 addr;
1578
1579         spin_lock(&bp->indirect_lock);
1580         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584         spin_unlock(&bp->indirect_lock);
1585 }
1586
1587 static void
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1589 {
1590         u32 msg;
1591         u8 link_up = bp->link_up;
1592         u8 old_port;
1593
1594         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1595
1596         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597                 bnx2_send_heart_beat(bp);
1598
1599         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1600
1601         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1602                 bp->link_up = 0;
1603         else {
1604                 u32 speed;
1605
1606                 bp->link_up = 1;
1607                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608                 bp->duplex = DUPLEX_FULL;
1609                 switch (speed) {
1610                         case BNX2_LINK_STATUS_10HALF:
1611                                 bp->duplex = DUPLEX_HALF;
1612                         case BNX2_LINK_STATUS_10FULL:
1613                                 bp->line_speed = SPEED_10;
1614                                 break;
1615                         case BNX2_LINK_STATUS_100HALF:
1616                                 bp->duplex = DUPLEX_HALF;
1617                         case BNX2_LINK_STATUS_100BASE_T4:
1618                         case BNX2_LINK_STATUS_100FULL:
1619                                 bp->line_speed = SPEED_100;
1620                                 break;
1621                         case BNX2_LINK_STATUS_1000HALF:
1622                                 bp->duplex = DUPLEX_HALF;
1623                         case BNX2_LINK_STATUS_1000FULL:
1624                                 bp->line_speed = SPEED_1000;
1625                                 break;
1626                         case BNX2_LINK_STATUS_2500HALF:
1627                                 bp->duplex = DUPLEX_HALF;
1628                         case BNX2_LINK_STATUS_2500FULL:
1629                                 bp->line_speed = SPEED_2500;
1630                                 break;
1631                         default:
1632                                 bp->line_speed = 0;
1633                                 break;
1634                 }
1635
1636                 spin_lock(&bp->phy_lock);
1637                 bp->flow_ctrl = 0;
1638                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640                         if (bp->duplex == DUPLEX_FULL)
1641                                 bp->flow_ctrl = bp->req_flow_ctrl;
1642                 } else {
1643                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1645                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1647                 }
1648
1649                 old_port = bp->phy_port;
1650                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651                         bp->phy_port = PORT_FIBRE;
1652                 else
1653                         bp->phy_port = PORT_TP;
1654
1655                 if (old_port != bp->phy_port)
1656                         bnx2_set_default_link(bp);
1657
1658                 spin_unlock(&bp->phy_lock);
1659         }
1660         if (bp->link_up != link_up)
1661                 bnx2_report_link(bp);
1662
1663         bnx2_set_mac_link(bp);
1664 }
1665
1666 static int
1667 bnx2_set_remote_link(struct bnx2 *bp)
1668 {
1669         u32 evt_code;
1670
1671         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1672         switch (evt_code) {
1673                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674                         bnx2_remote_phy_event(bp);
1675                         break;
1676                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1677                 default:
1678                         bnx2_send_heart_beat(bp);
1679                         break;
1680         }
1681         return 0;
1682 }
1683
1684 static int
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1686 {
1687         u32 bmcr;
1688         u32 new_bmcr;
1689
1690         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1691
1692         if (bp->autoneg & AUTONEG_SPEED) {
1693                 u32 adv_reg, adv1000_reg;
1694                 u32 new_adv_reg = 0;
1695                 u32 new_adv1000_reg = 0;
1696
1697                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699                         ADVERTISE_PAUSE_ASYM);
1700
1701                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702                 adv1000_reg &= PHY_ALL_1000_SPEED;
1703
1704                 if (bp->advertising & ADVERTISED_10baseT_Half)
1705                         new_adv_reg |= ADVERTISE_10HALF;
1706                 if (bp->advertising & ADVERTISED_10baseT_Full)
1707                         new_adv_reg |= ADVERTISE_10FULL;
1708                 if (bp->advertising & ADVERTISED_100baseT_Half)
1709                         new_adv_reg |= ADVERTISE_100HALF;
1710                 if (bp->advertising & ADVERTISED_100baseT_Full)
1711                         new_adv_reg |= ADVERTISE_100FULL;
1712                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713                         new_adv1000_reg |= ADVERTISE_1000FULL;
1714
1715                 new_adv_reg |= ADVERTISE_CSMA;
1716
1717                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1718
1719                 if ((adv1000_reg != new_adv1000_reg) ||
1720                         (adv_reg != new_adv_reg) ||
1721                         ((bmcr & BMCR_ANENABLE) == 0)) {
1722
1723                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1726                                 BMCR_ANENABLE);
1727                 }
1728                 else if (bp->link_up) {
1729                         /* Flow ctrl may have changed from auto to forced */
1730                         /* or vice-versa. */
1731
1732                         bnx2_resolve_flow_ctrl(bp);
1733                         bnx2_set_mac_link(bp);
1734                 }
1735                 return 0;
1736         }
1737
1738         new_bmcr = 0;
1739         if (bp->req_line_speed == SPEED_100) {
1740                 new_bmcr |= BMCR_SPEED100;
1741         }
1742         if (bp->req_duplex == DUPLEX_FULL) {
1743                 new_bmcr |= BMCR_FULLDPLX;
1744         }
1745         if (new_bmcr != bmcr) {
1746                 u32 bmsr;
1747
1748                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1750
1751                 if (bmsr & BMSR_LSTATUS) {
1752                         /* Force link down */
1753                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754                         spin_unlock_bh(&bp->phy_lock);
1755                         msleep(50);
1756                         spin_lock_bh(&bp->phy_lock);
1757
1758                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1760                 }
1761
1762                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1763
1764                 /* Normally, the new speed is setup after the link has
1765                  * gone down and up again. In some cases, link will not go
1766                  * down so we need to set up the new speed here.
1767                  */
1768                 if (bmsr & BMSR_LSTATUS) {
1769                         bp->line_speed = bp->req_line_speed;
1770                         bp->duplex = bp->req_duplex;
1771                         bnx2_resolve_flow_ctrl(bp);
1772                         bnx2_set_mac_link(bp);
1773                 }
1774         } else {
1775                 bnx2_resolve_flow_ctrl(bp);
1776                 bnx2_set_mac_link(bp);
1777         }
1778         return 0;
1779 }
1780
1781 static int
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1783 {
1784         if (bp->loopback == MAC_LOOPBACK)
1785                 return 0;
1786
1787         if (bp->phy_flags & PHY_SERDES_FLAG) {
1788                 return (bnx2_setup_serdes_phy(bp, port));
1789         }
1790         else {
1791                 return (bnx2_setup_copper_phy(bp));
1792         }
1793 }
1794
1795 static int
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1797 {
1798         u32 val;
1799
1800         bp->mii_bmcr = MII_BMCR + 0x10;
1801         bp->mii_bmsr = MII_BMSR + 0x10;
1802         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803         bp->mii_adv = MII_ADVERTISE + 0x10;
1804         bp->mii_lpa = MII_LPA + 0x10;
1805         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1806
1807         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1809
1810         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1811         bnx2_reset_phy(bp);
1812
1813         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1814
1815         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1819
1820         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823                 val |= BCM5708S_UP1_2G5;
1824         else
1825                 val &= ~BCM5708S_UP1_2G5;
1826         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1827
1828         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1832
1833         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1834
1835         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1838
1839         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1840
1841         return 0;
1842 }
1843
1844 static int
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1846 {
1847         u32 val;
1848
1849         bnx2_reset_phy(bp);
1850
1851         bp->mii_up1 = BCM5708S_UP1;
1852
1853         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1856
1857         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1860
1861         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1864
1865         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867                 val |= BCM5708S_UP1_2G5;
1868                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1869         }
1870
1871         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874                 /* increase tx signal amplitude */
1875                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876                                BCM5708S_BLK_ADDR_TX_MISC);
1877                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881         }
1882
1883         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1885
1886         if (val) {
1887                 u32 is_backplane;
1888
1889                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890                                           BNX2_SHARED_HW_CFG_CONFIG);
1891                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893                                        BCM5708S_BLK_ADDR_TX_MISC);
1894                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896                                        BCM5708S_BLK_ADDR_DIG);
1897                 }
1898         }
1899         return 0;
1900 }
1901
1902 static int
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1904 {
1905         bnx2_reset_phy(bp);
1906
1907         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1908
1909         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1911
1912         if (bp->dev->mtu > 1500) {
1913                 u32 val;
1914
1915                 /* Set extended packet length bit */
1916                 bnx2_write_phy(bp, 0x18, 0x7);
1917                 bnx2_read_phy(bp, 0x18, &val);
1918                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1919
1920                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921                 bnx2_read_phy(bp, 0x1c, &val);
1922                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1923         }
1924         else {
1925                 u32 val;
1926
1927                 bnx2_write_phy(bp, 0x18, 0x7);
1928                 bnx2_read_phy(bp, 0x18, &val);
1929                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1930
1931                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932                 bnx2_read_phy(bp, 0x1c, &val);
1933                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1934         }
1935
1936         return 0;
1937 }
1938
1939 static int
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1941 {
1942         u32 val;
1943
1944         bnx2_reset_phy(bp);
1945
1946         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947                 bnx2_write_phy(bp, 0x18, 0x0c00);
1948                 bnx2_write_phy(bp, 0x17, 0x000a);
1949                 bnx2_write_phy(bp, 0x15, 0x310b);
1950                 bnx2_write_phy(bp, 0x17, 0x201f);
1951                 bnx2_write_phy(bp, 0x15, 0x9506);
1952                 bnx2_write_phy(bp, 0x17, 0x401f);
1953                 bnx2_write_phy(bp, 0x15, 0x14e2);
1954                 bnx2_write_phy(bp, 0x18, 0x0400);
1955         }
1956
1957         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1960                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1961                 val &= ~(1 << 8);
1962                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1963         }
1964
1965         if (bp->dev->mtu > 1500) {
1966                 /* Set extended packet length bit */
1967                 bnx2_write_phy(bp, 0x18, 0x7);
1968                 bnx2_read_phy(bp, 0x18, &val);
1969                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1970
1971                 bnx2_read_phy(bp, 0x10, &val);
1972                 bnx2_write_phy(bp, 0x10, val | 0x1);
1973         }
1974         else {
1975                 bnx2_write_phy(bp, 0x18, 0x7);
1976                 bnx2_read_phy(bp, 0x18, &val);
1977                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1978
1979                 bnx2_read_phy(bp, 0x10, &val);
1980                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1981         }
1982
1983         /* ethernet@wirespeed */
1984         bnx2_write_phy(bp, 0x18, 0x7007);
1985         bnx2_read_phy(bp, 0x18, &val);
1986         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1987         return 0;
1988 }
1989
1990
1991 static int
1992 bnx2_init_phy(struct bnx2 *bp)
1993 {
1994         u32 val;
1995         int rc = 0;
1996
1997         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1999
2000         bp->mii_bmcr = MII_BMCR;
2001         bp->mii_bmsr = MII_BMSR;
2002         bp->mii_bmsr1 = MII_BMSR;
2003         bp->mii_adv = MII_ADVERTISE;
2004         bp->mii_lpa = MII_LPA;
2005
2006         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2007
2008         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2009                 goto setup_phy;
2010
2011         bnx2_read_phy(bp, MII_PHYSID1, &val);
2012         bp->phy_id = val << 16;
2013         bnx2_read_phy(bp, MII_PHYSID2, &val);
2014         bp->phy_id |= val & 0xffff;
2015
2016         if (bp->phy_flags & PHY_SERDES_FLAG) {
2017                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018                         rc = bnx2_init_5706s_phy(bp);
2019                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020                         rc = bnx2_init_5708s_phy(bp);
2021                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022                         rc = bnx2_init_5709s_phy(bp);
2023         }
2024         else {
2025                 rc = bnx2_init_copper_phy(bp);
2026         }
2027
2028 setup_phy:
2029         if (!rc)
2030                 rc = bnx2_setup_phy(bp, bp->phy_port);
2031
2032         return rc;
2033 }
2034
2035 static int
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2037 {
2038         u32 mac_mode;
2039
2040         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2044         bp->link_up = 1;
2045         return 0;
2046 }
2047
2048 static int bnx2_test_link(struct bnx2 *);
2049
2050 static int
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2052 {
2053         u32 mac_mode;
2054         int rc, i;
2055
2056         spin_lock_bh(&bp->phy_lock);
2057         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2058                             BMCR_SPEED1000);
2059         spin_unlock_bh(&bp->phy_lock);
2060         if (rc)
2061                 return rc;
2062
2063         for (i = 0; i < 10; i++) {
2064                 if (bnx2_test_link(bp) == 0)
2065                         break;
2066                 msleep(100);
2067         }
2068
2069         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072                       BNX2_EMAC_MODE_25G_MODE);
2073
2074         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2076         bp->link_up = 1;
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2082 {
2083         int i;
2084         u32 val;
2085
2086         bp->fw_wr_seq++;
2087         msg_data |= bp->fw_wr_seq;
2088
2089         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2090
2091         /* wait for an acknowledgement. */
2092         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2093                 msleep(10);
2094
2095                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2096
2097                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2098                         break;
2099         }
2100         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2101                 return 0;
2102
2103         /* If we timed out, inform the firmware that this is the case. */
2104         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2105                 if (!silent)
2106                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2107                                             "%x\n", msg_data);
2108
2109                 msg_data &= ~BNX2_DRV_MSG_CODE;
2110                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2111
2112                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2113
2114                 return -EBUSY;
2115         }
2116
2117         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2118                 return -EIO;
2119
2120         return 0;
2121 }
2122
2123 static int
2124 bnx2_init_5709_context(struct bnx2 *bp)
2125 {
2126         int i, ret = 0;
2127         u32 val;
2128
2129         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130         val |= (BCM_PAGE_BITS - 8) << 16;
2131         REG_WR(bp, BNX2_CTX_COMMAND, val);
2132         for (i = 0; i < 10; i++) {
2133                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2135                         break;
2136                 udelay(2);
2137         }
2138         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2139                 return -EBUSY;
2140
2141         for (i = 0; i < bp->ctx_pages; i++) {
2142                 int j;
2143
2144                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148                        (u64) bp->ctx_blk_mapping[i] >> 32);
2149                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151                 for (j = 0; j < 10; j++) {
2152
2153                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2155                                 break;
2156                         udelay(5);
2157                 }
2158                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2159                         ret = -EBUSY;
2160                         break;
2161                 }
2162         }
2163         return ret;
2164 }
2165
2166 static void
2167 bnx2_init_context(struct bnx2 *bp)
2168 {
2169         u32 vcid;
2170
2171         vcid = 96;
2172         while (vcid) {
2173                 u32 vcid_addr, pcid_addr, offset;
2174                 int i;
2175
2176                 vcid--;
2177
2178                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2179                         u32 new_vcid;
2180
2181                         vcid_addr = GET_PCID_ADDR(vcid);
2182                         if (vcid & 0x8) {
2183                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2184                         }
2185                         else {
2186                                 new_vcid = vcid;
2187                         }
2188                         pcid_addr = GET_PCID_ADDR(new_vcid);
2189                 }
2190                 else {
2191                         vcid_addr = GET_CID_ADDR(vcid);
2192                         pcid_addr = vcid_addr;
2193                 }
2194
2195                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196                         vcid_addr += (i << PHY_CTX_SHIFT);
2197                         pcid_addr += (i << PHY_CTX_SHIFT);
2198
2199                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2201
2202                         /* Zero out the context. */
2203                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204                                 CTX_WR(bp, vcid_addr, offset, 0);
2205                 }
2206         }
2207 }
2208
2209 static int
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2211 {
2212         u16 *good_mbuf;
2213         u32 good_mbuf_cnt;
2214         u32 val;
2215
2216         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217         if (good_mbuf == NULL) {
2218                 printk(KERN_ERR PFX "Failed to allocate memory in "
2219                                     "bnx2_alloc_bad_rbuf\n");
2220                 return -ENOMEM;
2221         }
2222
2223         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2225
2226         good_mbuf_cnt = 0;
2227
2228         /* Allocate a bunch of mbufs and save the good ones in an array. */
2229         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2232
2233                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2234
2235                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2236
2237                 /* The addresses with Bit 9 set are bad memory blocks. */
2238                 if (!(val & (1 << 9))) {
2239                         good_mbuf[good_mbuf_cnt] = (u16) val;
2240                         good_mbuf_cnt++;
2241                 }
2242
2243                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2244         }
2245
2246         /* Free the good ones back to the mbuf pool thus discarding
2247          * all the bad ones. */
2248         while (good_mbuf_cnt) {
2249                 good_mbuf_cnt--;
2250
2251                 val = good_mbuf[good_mbuf_cnt];
2252                 val = (val << 9) | val | 1;
2253
2254                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2255         }
2256         kfree(good_mbuf);
2257         return 0;
2258 }
2259
2260 static void
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2262 {
2263         u32 val;
2264         u8 *mac_addr = bp->dev->dev_addr;
2265
2266         val = (mac_addr[0] << 8) | mac_addr[1];
2267
2268         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2269
2270         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271                 (mac_addr[4] << 8) | mac_addr[5];
2272
2273         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2274 }
2275
2276 static inline int
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2278 {
2279         dma_addr_t mapping;
2280         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281         struct rx_bd *rxbd =
2282                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283         struct page *page = alloc_page(GFP_ATOMIC);
2284
2285         if (!page)
2286                 return -ENOMEM;
2287         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288                                PCI_DMA_FROMDEVICE);
2289         rx_pg->page = page;
2290         pci_unmap_addr_set(rx_pg, mapping, mapping);
2291         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2293         return 0;
2294 }
2295
2296 static void
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2298 {
2299         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300         struct page *page = rx_pg->page;
2301
2302         if (!page)
2303                 return;
2304
2305         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306                        PCI_DMA_FROMDEVICE);
2307
2308         __free_page(page);
2309         rx_pg->page = NULL;
2310 }
2311
2312 static inline int
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2314 {
2315         struct sk_buff *skb;
2316         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2317         dma_addr_t mapping;
2318         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319         unsigned long align;
2320
2321         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2322         if (skb == NULL) {
2323                 return -ENOMEM;
2324         }
2325
2326         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2328
2329         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330                 PCI_DMA_FROMDEVICE);
2331
2332         rx_buf->skb = skb;
2333         pci_unmap_addr_set(rx_buf, mapping, mapping);
2334
2335         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2337
2338         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2339
2340         return 0;
2341 }
2342
2343 static int
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2345 {
2346         struct status_block *sblk = bnapi->status_blk;
2347         u32 new_link_state, old_link_state;
2348         int is_set = 1;
2349
2350         new_link_state = sblk->status_attn_bits & event;
2351         old_link_state = sblk->status_attn_bits_ack & event;
2352         if (new_link_state != old_link_state) {
2353                 if (new_link_state)
2354                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2355                 else
2356                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2357         } else
2358                 is_set = 0;
2359
2360         return is_set;
2361 }
2362
2363 static void
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2365 {
2366         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367                 spin_lock(&bp->phy_lock);
2368                 bnx2_set_link(bp);
2369                 spin_unlock(&bp->phy_lock);
2370         }
2371         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372                 bnx2_set_remote_link(bp);
2373
2374 }
2375
2376 static inline u16
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2378 {
2379         u16 cons;
2380
2381         if (bnapi->int_num == 0)
2382                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2383         else
2384                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2385
2386         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2387                 cons++;
2388         return cons;
2389 }
2390
2391 static void
2392 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2393 {
2394         u16 hw_cons, sw_cons, sw_ring_cons;
2395
2396         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2397         sw_cons = bnapi->tx_cons;
2398
2399         while (sw_cons != hw_cons) {
2400                 struct sw_bd *tx_buf;
2401                 struct sk_buff *skb;
2402                 int i, last;
2403
2404                 sw_ring_cons = TX_RING_IDX(sw_cons);
2405
2406                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2407                 skb = tx_buf->skb;
2408
2409                 /* partial BD completions possible with TSO packets */
2410                 if (skb_is_gso(skb)) {
2411                         u16 last_idx, last_ring_idx;
2412
2413                         last_idx = sw_cons +
2414                                 skb_shinfo(skb)->nr_frags + 1;
2415                         last_ring_idx = sw_ring_cons +
2416                                 skb_shinfo(skb)->nr_frags + 1;
2417                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2418                                 last_idx++;
2419                         }
2420                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2421                                 break;
2422                         }
2423                 }
2424
2425                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2426                         skb_headlen(skb), PCI_DMA_TODEVICE);
2427
2428                 tx_buf->skb = NULL;
2429                 last = skb_shinfo(skb)->nr_frags;
2430
2431                 for (i = 0; i < last; i++) {
2432                         sw_cons = NEXT_TX_BD(sw_cons);
2433
2434                         pci_unmap_page(bp->pdev,
2435                                 pci_unmap_addr(
2436                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2437                                         mapping),
2438                                 skb_shinfo(skb)->frags[i].size,
2439                                 PCI_DMA_TODEVICE);
2440                 }
2441
2442                 sw_cons = NEXT_TX_BD(sw_cons);
2443
2444                 dev_kfree_skb(skb);
2445
2446                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2447         }
2448
2449         bnapi->hw_tx_cons = hw_cons;
2450         bnapi->tx_cons = sw_cons;
2451         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2452          * before checking for netif_queue_stopped().  Without the
2453          * memory barrier, there is a small possibility that bnx2_start_xmit()
2454          * will miss it and cause the queue to be stopped forever.
2455          */
2456         smp_mb();
2457
2458         if (unlikely(netif_queue_stopped(bp->dev)) &&
2459                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2460                 netif_tx_lock(bp->dev);
2461                 if ((netif_queue_stopped(bp->dev)) &&
2462                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2463                         netif_wake_queue(bp->dev);
2464                 netif_tx_unlock(bp->dev);
2465         }
2466 }
2467
2468 static void
2469 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2470                         struct sk_buff *skb, int count)
2471 {
2472         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2473         struct rx_bd *cons_bd, *prod_bd;
2474         dma_addr_t mapping;
2475         int i;
2476         u16 hw_prod = bnapi->rx_pg_prod, prod;
2477         u16 cons = bnapi->rx_pg_cons;
2478
2479         for (i = 0; i < count; i++) {
2480                 prod = RX_PG_RING_IDX(hw_prod);
2481
2482                 prod_rx_pg = &bp->rx_pg_ring[prod];
2483                 cons_rx_pg = &bp->rx_pg_ring[cons];
2484                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2485                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2486
2487                 if (i == 0 && skb) {
2488                         struct page *page;
2489                         struct skb_shared_info *shinfo;
2490
2491                         shinfo = skb_shinfo(skb);
2492                         shinfo->nr_frags--;
2493                         page = shinfo->frags[shinfo->nr_frags].page;
2494                         shinfo->frags[shinfo->nr_frags].page = NULL;
2495                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2496                                                PCI_DMA_FROMDEVICE);
2497                         cons_rx_pg->page = page;
2498                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2499                         dev_kfree_skb(skb);
2500                 }
2501                 if (prod != cons) {
2502                         prod_rx_pg->page = cons_rx_pg->page;
2503                         cons_rx_pg->page = NULL;
2504                         pci_unmap_addr_set(prod_rx_pg, mapping,
2505                                 pci_unmap_addr(cons_rx_pg, mapping));
2506
2507                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2508                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2509
2510                 }
2511                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2512                 hw_prod = NEXT_RX_BD(hw_prod);
2513         }
2514         bnapi->rx_pg_prod = hw_prod;
2515         bnapi->rx_pg_cons = cons;
2516 }
2517
2518 static inline void
2519 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2520         u16 cons, u16 prod)
2521 {
2522         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2523         struct rx_bd *cons_bd, *prod_bd;
2524
2525         cons_rx_buf = &bp->rx_buf_ring[cons];
2526         prod_rx_buf = &bp->rx_buf_ring[prod];
2527
2528         pci_dma_sync_single_for_device(bp->pdev,
2529                 pci_unmap_addr(cons_rx_buf, mapping),
2530                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2531
2532         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2533
2534         prod_rx_buf->skb = skb;
2535
2536         if (cons == prod)
2537                 return;
2538
2539         pci_unmap_addr_set(prod_rx_buf, mapping,
2540                         pci_unmap_addr(cons_rx_buf, mapping));
2541
2542         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2543         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2544         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2545         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2546 }
2547
2548 static int
2549 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2551             u32 ring_idx)
2552 {
2553         int err;
2554         u16 prod = ring_idx & 0xffff;
2555
2556         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2557         if (unlikely(err)) {
2558                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2559                 if (hdr_len) {
2560                         unsigned int raw_len = len + 4;
2561                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2562
2563                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2564                 }
2565                 return err;
2566         }
2567
2568         skb_reserve(skb, bp->rx_offset);
2569         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2570                          PCI_DMA_FROMDEVICE);
2571
2572         if (hdr_len == 0) {
2573                 skb_put(skb, len);
2574                 return 0;
2575         } else {
2576                 unsigned int i, frag_len, frag_size, pages;
2577                 struct sw_pg *rx_pg;
2578                 u16 pg_cons = bnapi->rx_pg_cons;
2579                 u16 pg_prod = bnapi->rx_pg_prod;
2580
2581                 frag_size = len + 4 - hdr_len;
2582                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2583                 skb_put(skb, hdr_len);
2584
2585                 for (i = 0; i < pages; i++) {
2586                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2587                         if (unlikely(frag_len <= 4)) {
2588                                 unsigned int tail = 4 - frag_len;
2589
2590                                 bnapi->rx_pg_cons = pg_cons;
2591                                 bnapi->rx_pg_prod = pg_prod;
2592                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2593                                                         pages - i);
2594                                 skb->len -= tail;
2595                                 if (i == 0) {
2596                                         skb->tail -= tail;
2597                                 } else {
2598                                         skb_frag_t *frag =
2599                                                 &skb_shinfo(skb)->frags[i - 1];
2600                                         frag->size -= tail;
2601                                         skb->data_len -= tail;
2602                                         skb->truesize -= tail;
2603                                 }
2604                                 return 0;
2605                         }
2606                         rx_pg = &bp->rx_pg_ring[pg_cons];
2607
2608                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2609                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2610
2611                         if (i == pages - 1)
2612                                 frag_len -= 4;
2613
2614                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2615                         rx_pg->page = NULL;
2616
2617                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2618                         if (unlikely(err)) {
2619                                 bnapi->rx_pg_cons = pg_cons;
2620                                 bnapi->rx_pg_prod = pg_prod;
2621                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2622                                                         pages - i);
2623                                 return err;
2624                         }
2625
2626                         frag_size -= frag_len;
2627                         skb->data_len += frag_len;
2628                         skb->truesize += frag_len;
2629                         skb->len += frag_len;
2630
2631                         pg_prod = NEXT_RX_BD(pg_prod);
2632                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2633                 }
2634                 bnapi->rx_pg_prod = pg_prod;
2635                 bnapi->rx_pg_cons = pg_cons;
2636         }
2637         return 0;
2638 }
2639
2640 static inline u16
2641 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2642 {
2643         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2644
2645         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2646                 cons++;
2647         return cons;
2648 }
2649
2650 static int
2651 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2652 {
2653         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2654         struct l2_fhdr *rx_hdr;
2655         int rx_pkt = 0, pg_ring_used = 0;
2656
2657         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2658         sw_cons = bnapi->rx_cons;
2659         sw_prod = bnapi->rx_prod;
2660
2661         /* Memory barrier necessary as speculative reads of the rx
2662          * buffer can be ahead of the index in the status block
2663          */
2664         rmb();
2665         while (sw_cons != hw_cons) {
2666                 unsigned int len, hdr_len;
2667                 u32 status;
2668                 struct sw_bd *rx_buf;
2669                 struct sk_buff *skb;
2670                 dma_addr_t dma_addr;
2671
2672                 sw_ring_cons = RX_RING_IDX(sw_cons);
2673                 sw_ring_prod = RX_RING_IDX(sw_prod);
2674
2675                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2676                 skb = rx_buf->skb;
2677
2678                 rx_buf->skb = NULL;
2679
2680                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2681
2682                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2683                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2684
2685                 rx_hdr = (struct l2_fhdr *) skb->data;
2686                 len = rx_hdr->l2_fhdr_pkt_len;
2687
2688                 if ((status = rx_hdr->l2_fhdr_status) &
2689                         (L2_FHDR_ERRORS_BAD_CRC |
2690                         L2_FHDR_ERRORS_PHY_DECODE |
2691                         L2_FHDR_ERRORS_ALIGNMENT |
2692                         L2_FHDR_ERRORS_TOO_SHORT |
2693                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2694
2695                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2696                                           sw_ring_prod);
2697                         goto next_rx;
2698                 }
2699                 hdr_len = 0;
2700                 if (status & L2_FHDR_STATUS_SPLIT) {
2701                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2702                         pg_ring_used = 1;
2703                 } else if (len > bp->rx_jumbo_thresh) {
2704                         hdr_len = bp->rx_jumbo_thresh;
2705                         pg_ring_used = 1;
2706                 }
2707
2708                 len -= 4;
2709
2710                 if (len <= bp->rx_copy_thresh) {
2711                         struct sk_buff *new_skb;
2712
2713                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2714                         if (new_skb == NULL) {
2715                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2716                                                   sw_ring_prod);
2717                                 goto next_rx;
2718                         }
2719
2720                         /* aligned copy */
2721                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2722                                       new_skb->data, len + 2);
2723                         skb_reserve(new_skb, 2);
2724                         skb_put(new_skb, len);
2725
2726                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2727                                 sw_ring_cons, sw_ring_prod);
2728
2729                         skb = new_skb;
2730                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2731                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2732                         goto next_rx;
2733
2734                 skb->protocol = eth_type_trans(skb, bp->dev);
2735
2736                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2737                         (ntohs(skb->protocol) != 0x8100)) {
2738
2739                         dev_kfree_skb(skb);
2740                         goto next_rx;
2741
2742                 }
2743
2744                 skb->ip_summed = CHECKSUM_NONE;
2745                 if (bp->rx_csum &&
2746                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2747                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2748
2749                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2750                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2751                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2752                 }
2753
2754 #ifdef BCM_VLAN
2755                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2756                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2757                                 rx_hdr->l2_fhdr_vlan_tag);
2758                 }
2759                 else
2760 #endif
2761                         netif_receive_skb(skb);
2762
2763                 bp->dev->last_rx = jiffies;
2764                 rx_pkt++;
2765
2766 next_rx:
2767                 sw_cons = NEXT_RX_BD(sw_cons);
2768                 sw_prod = NEXT_RX_BD(sw_prod);
2769
2770                 if ((rx_pkt == budget))
2771                         break;
2772
2773                 /* Refresh hw_cons to see if there is new work */
2774                 if (sw_cons == hw_cons) {
2775                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2776                         rmb();
2777                 }
2778         }
2779         bnapi->rx_cons = sw_cons;
2780         bnapi->rx_prod = sw_prod;
2781
2782         if (pg_ring_used)
2783                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2784                          bnapi->rx_pg_prod);
2785
2786         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2787
2788         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2789
2790         mmiowb();
2791
2792         return rx_pkt;
2793
2794 }
2795
2796 /* MSI ISR - The only difference between this and the INTx ISR
2797  * is that the MSI interrupt is always serviced.
2798  */
2799 static irqreturn_t
2800 bnx2_msi(int irq, void *dev_instance)
2801 {
2802         struct net_device *dev = dev_instance;
2803         struct bnx2 *bp = netdev_priv(dev);
2804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2805
2806         prefetch(bnapi->status_blk);
2807         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2808                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2809                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2810
2811         /* Return here if interrupt is disabled. */
2812         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2813                 return IRQ_HANDLED;
2814
2815         netif_rx_schedule(dev, &bnapi->napi);
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static irqreturn_t
2821 bnx2_msi_1shot(int irq, void *dev_instance)
2822 {
2823         struct net_device *dev = dev_instance;
2824         struct bnx2 *bp = netdev_priv(dev);
2825         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2826
2827         prefetch(bnapi->status_blk);
2828
2829         /* Return here if interrupt is disabled. */
2830         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2831                 return IRQ_HANDLED;
2832
2833         netif_rx_schedule(dev, &bnapi->napi);
2834
2835         return IRQ_HANDLED;
2836 }
2837
2838 static irqreturn_t
2839 bnx2_interrupt(int irq, void *dev_instance)
2840 {
2841         struct net_device *dev = dev_instance;
2842         struct bnx2 *bp = netdev_priv(dev);
2843         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2844         struct status_block *sblk = bnapi->status_blk;
2845
2846         /* When using INTx, it is possible for the interrupt to arrive
2847          * at the CPU before the status block posted prior to the
2848          * interrupt. Reading a register will flush the status block.
2849          * When using MSI, the MSI message will always complete after
2850          * the status block write.
2851          */
2852         if ((sblk->status_idx == bnapi->last_status_idx) &&
2853             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2854              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2855                 return IRQ_NONE;
2856
2857         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2858                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2859                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2860
2861         /* Read back to deassert IRQ immediately to avoid too many
2862          * spurious interrupts.
2863          */
2864         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2865
2866         /* Return here if interrupt is shared and is disabled. */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2868                 return IRQ_HANDLED;
2869
2870         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2871                 bnapi->last_status_idx = sblk->status_idx;
2872                 __netif_rx_schedule(dev, &bnapi->napi);
2873         }
2874
2875         return IRQ_HANDLED;
2876 }
2877
2878 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2879                                  STATUS_ATTN_BITS_TIMER_ABORT)
2880
2881 static inline int
2882 bnx2_has_work(struct bnx2_napi *bnapi)
2883 {
2884         struct bnx2 *bp = bnapi->bp;
2885         struct status_block *sblk = bp->status_blk;
2886
2887         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2888             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2889                 return 1;
2890
2891         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2892             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2893                 return 1;
2894
2895         return 0;
2896 }
2897
2898 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2899                           int work_done, int budget)
2900 {
2901         struct status_block *sblk = bnapi->status_blk;
2902         u32 status_attn_bits = sblk->status_attn_bits;
2903         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2904
2905         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2906             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2907
2908                 bnx2_phy_int(bp, bnapi);
2909
2910                 /* This is needed to take care of transient status
2911                  * during link changes.
2912                  */
2913                 REG_WR(bp, BNX2_HC_COMMAND,
2914                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2915                 REG_RD(bp, BNX2_HC_COMMAND);
2916         }
2917
2918         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2919                 bnx2_tx_int(bp, bnapi);
2920
2921         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2922                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2923
2924         return work_done;
2925 }
2926
2927 static int bnx2_poll(struct napi_struct *napi, int budget)
2928 {
2929         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2930         struct bnx2 *bp = bnapi->bp;
2931         int work_done = 0;
2932         struct status_block *sblk = bnapi->status_blk;
2933
2934         while (1) {
2935                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2936
2937                 if (unlikely(work_done >= budget))
2938                         break;
2939
2940                 /* bnapi->last_status_idx is used below to tell the hw how
2941                  * much work has been processed, so we must read it before
2942                  * checking for more work.
2943                  */
2944                 bnapi->last_status_idx = sblk->status_idx;
2945                 rmb();
2946                 if (likely(!bnx2_has_work(bnapi))) {
2947                         netif_rx_complete(bp->dev, napi);
2948                         if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2949                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2950                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2951                                        bnapi->last_status_idx);
2952                                 break;
2953                         }
2954                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2955                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2956                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2957                                bnapi->last_status_idx);
2958
2959                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2960                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2961                                bnapi->last_status_idx);
2962                         break;
2963                 }
2964         }
2965
2966         return work_done;
2967 }
2968
2969 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2970  * from set_multicast.
2971  */
2972 static void
2973 bnx2_set_rx_mode(struct net_device *dev)
2974 {
2975         struct bnx2 *bp = netdev_priv(dev);
2976         u32 rx_mode, sort_mode;
2977         int i;
2978
2979         spin_lock_bh(&bp->phy_lock);
2980
2981         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2982                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2983         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2984 #ifdef BCM_VLAN
2985         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2986                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2987 #else
2988         if (!(bp->flags & ASF_ENABLE_FLAG))
2989                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2990 #endif
2991         if (dev->flags & IFF_PROMISC) {
2992                 /* Promiscuous mode. */
2993                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2994                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2995                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2996         }
2997         else if (dev->flags & IFF_ALLMULTI) {
2998                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2999                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3000                                0xffffffff);
3001                 }
3002                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3003         }
3004         else {
3005                 /* Accept one or more multicast(s). */
3006                 struct dev_mc_list *mclist;
3007                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3008                 u32 regidx;
3009                 u32 bit;
3010                 u32 crc;
3011
3012                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3013
3014                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3015                      i++, mclist = mclist->next) {
3016
3017                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3018                         bit = crc & 0xff;
3019                         regidx = (bit & 0xe0) >> 5;
3020                         bit &= 0x1f;
3021                         mc_filter[regidx] |= (1 << bit);
3022                 }
3023
3024                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3025                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3026                                mc_filter[i]);
3027                 }
3028
3029                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3030         }
3031
3032         if (rx_mode != bp->rx_mode) {
3033                 bp->rx_mode = rx_mode;
3034                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3035         }
3036
3037         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3038         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3039         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3040
3041         spin_unlock_bh(&bp->phy_lock);
3042 }
3043
3044 static void
3045 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3046         u32 rv2p_proc)
3047 {
3048         int i;
3049         u32 val;
3050
3051
3052         for (i = 0; i < rv2p_code_len; i += 8) {
3053                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3054                 rv2p_code++;
3055                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3056                 rv2p_code++;
3057
3058                 if (rv2p_proc == RV2P_PROC1) {
3059                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3060                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3061                 }
3062                 else {
3063                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3064                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3065                 }
3066         }
3067
3068         /* Reset the processor, un-stall is done later. */
3069         if (rv2p_proc == RV2P_PROC1) {
3070                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3071         }
3072         else {
3073                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3074         }
3075 }
3076
3077 static int
3078 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3079 {
3080         u32 offset;
3081         u32 val;
3082         int rc;
3083
3084         /* Halt the CPU. */
3085         val = REG_RD_IND(bp, cpu_reg->mode);
3086         val |= cpu_reg->mode_value_halt;
3087         REG_WR_IND(bp, cpu_reg->mode, val);
3088         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089
3090         /* Load the Text area. */
3091         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3092         if (fw->gz_text) {
3093                 int j;
3094
3095                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3096                                        fw->gz_text_len);
3097                 if (rc < 0)
3098                         return rc;
3099
3100                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3101                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3102                 }
3103         }
3104
3105         /* Load the Data area. */
3106         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3107         if (fw->data) {
3108                 int j;
3109
3110                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3111                         REG_WR_IND(bp, offset, fw->data[j]);
3112                 }
3113         }
3114
3115         /* Load the SBSS area. */
3116         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3117         if (fw->sbss_len) {
3118                 int j;
3119
3120                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3121                         REG_WR_IND(bp, offset, 0);
3122                 }
3123         }
3124
3125         /* Load the BSS area. */
3126         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3127         if (fw->bss_len) {
3128                 int j;
3129
3130                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3131                         REG_WR_IND(bp, offset, 0);
3132                 }
3133         }
3134
3135         /* Load the Read-Only area. */
3136         offset = cpu_reg->spad_base +
3137                 (fw->rodata_addr - cpu_reg->mips_view_base);
3138         if (fw->rodata) {
3139                 int j;
3140
3141                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3142                         REG_WR_IND(bp, offset, fw->rodata[j]);
3143                 }
3144         }
3145
3146         /* Clear the pre-fetch instruction. */
3147         REG_WR_IND(bp, cpu_reg->inst, 0);
3148         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3149
3150         /* Start the CPU. */
3151         val = REG_RD_IND(bp, cpu_reg->mode);
3152         val &= ~cpu_reg->mode_value_halt;
3153         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3154         REG_WR_IND(bp, cpu_reg->mode, val);
3155
3156         return 0;
3157 }
3158
3159 static int
3160 bnx2_init_cpus(struct bnx2 *bp)
3161 {
3162         struct cpu_reg cpu_reg;
3163         struct fw_info *fw;
3164         int rc, rv2p_len;
3165         void *text, *rv2p;
3166
3167         /* Initialize the RV2P processor. */
3168         text = vmalloc(FW_BUF_SIZE);
3169         if (!text)
3170                 return -ENOMEM;
3171         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3172                 rv2p = bnx2_xi_rv2p_proc1;
3173                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3174         } else {
3175                 rv2p = bnx2_rv2p_proc1;
3176                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3177         }
3178         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3179         if (rc < 0)
3180                 goto init_cpu_err;
3181
3182         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3183
3184         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3185                 rv2p = bnx2_xi_rv2p_proc2;
3186                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3187         } else {
3188                 rv2p = bnx2_rv2p_proc2;
3189                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3190         }
3191         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3192         if (rc < 0)
3193                 goto init_cpu_err;
3194
3195         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3196
3197         /* Initialize the RX Processor. */
3198         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3199         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3200         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3201         cpu_reg.state = BNX2_RXP_CPU_STATE;
3202         cpu_reg.state_value_clear = 0xffffff;
3203         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3204         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3205         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3206         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3207         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3208         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3209         cpu_reg.mips_view_base = 0x8000000;
3210
3211         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3212                 fw = &bnx2_rxp_fw_09;
3213         else
3214                 fw = &bnx2_rxp_fw_06;
3215
3216         fw->text = text;
3217         rc = load_cpu_fw(bp, &cpu_reg, fw);
3218         if (rc)
3219                 goto init_cpu_err;
3220
3221         /* Initialize the TX Processor. */
3222         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3223         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3224         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3225         cpu_reg.state = BNX2_TXP_CPU_STATE;
3226         cpu_reg.state_value_clear = 0xffffff;
3227         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3228         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3229         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3230         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3231         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3232         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3233         cpu_reg.mips_view_base = 0x8000000;
3234
3235         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3236                 fw = &bnx2_txp_fw_09;
3237         else
3238                 fw = &bnx2_txp_fw_06;
3239
3240         fw->text = text;
3241         rc = load_cpu_fw(bp, &cpu_reg, fw);
3242         if (rc)
3243                 goto init_cpu_err;
3244
3245         /* Initialize the TX Patch-up Processor. */
3246         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3247         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3248         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3249         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3250         cpu_reg.state_value_clear = 0xffffff;
3251         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3252         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3253         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3254         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3255         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3256         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3257         cpu_reg.mips_view_base = 0x8000000;
3258
3259         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3260                 fw = &bnx2_tpat_fw_09;
3261         else
3262                 fw = &bnx2_tpat_fw_06;
3263
3264         fw->text = text;
3265         rc = load_cpu_fw(bp, &cpu_reg, fw);
3266         if (rc)
3267                 goto init_cpu_err;
3268
3269         /* Initialize the Completion Processor. */
3270         cpu_reg.mode = BNX2_COM_CPU_MODE;
3271         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3272         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3273         cpu_reg.state = BNX2_COM_CPU_STATE;
3274         cpu_reg.state_value_clear = 0xffffff;
3275         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3276         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3277         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3278         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3279         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3280         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3281         cpu_reg.mips_view_base = 0x8000000;
3282
3283         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3284                 fw = &bnx2_com_fw_09;
3285         else
3286                 fw = &bnx2_com_fw_06;
3287
3288         fw->text = text;
3289         rc = load_cpu_fw(bp, &cpu_reg, fw);
3290         if (rc)
3291                 goto init_cpu_err;
3292
3293         /* Initialize the Command Processor. */
3294         cpu_reg.mode = BNX2_CP_CPU_MODE;
3295         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3296         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3297         cpu_reg.state = BNX2_CP_CPU_STATE;
3298         cpu_reg.state_value_clear = 0xffffff;
3299         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3300         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3301         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3302         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3303         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3304         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3305         cpu_reg.mips_view_base = 0x8000000;
3306
3307         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3308                 fw = &bnx2_cp_fw_09;
3309         else
3310                 fw = &bnx2_cp_fw_06;
3311
3312         fw->text = text;
3313         rc = load_cpu_fw(bp, &cpu_reg, fw);
3314
3315 init_cpu_err:
3316         vfree(text);
3317         return rc;
3318 }
3319
3320 static int
3321 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3322 {
3323         u16 pmcsr;
3324
3325         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3326
3327         switch (state) {
3328         case PCI_D0: {
3329                 u32 val;
3330
3331                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3332                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3333                         PCI_PM_CTRL_PME_STATUS);
3334
3335                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3336                         /* delay required during transition out of D3hot */
3337                         msleep(20);
3338
3339                 val = REG_RD(bp, BNX2_EMAC_MODE);
3340                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3341                 val &= ~BNX2_EMAC_MODE_MPKT;
3342                 REG_WR(bp, BNX2_EMAC_MODE, val);
3343
3344                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3345                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3346                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3347                 break;
3348         }
3349         case PCI_D3hot: {
3350                 int i;
3351                 u32 val, wol_msg;
3352
3353                 if (bp->wol) {
3354                         u32 advertising;
3355                         u8 autoneg;
3356
3357                         autoneg = bp->autoneg;
3358                         advertising = bp->advertising;
3359
3360                         if (bp->phy_port == PORT_TP) {
3361                                 bp->autoneg = AUTONEG_SPEED;
3362                                 bp->advertising = ADVERTISED_10baseT_Half |
3363                                         ADVERTISED_10baseT_Full |
3364                                         ADVERTISED_100baseT_Half |
3365                                         ADVERTISED_100baseT_Full |
3366                                         ADVERTISED_Autoneg;
3367                         }
3368
3369                         spin_lock_bh(&bp->phy_lock);
3370                         bnx2_setup_phy(bp, bp->phy_port);
3371                         spin_unlock_bh(&bp->phy_lock);
3372
3373                         bp->autoneg = autoneg;
3374                         bp->advertising = advertising;
3375
3376                         bnx2_set_mac_addr(bp);
3377
3378                         val = REG_RD(bp, BNX2_EMAC_MODE);
3379
3380                         /* Enable port mode. */
3381                         val &= ~BNX2_EMAC_MODE_PORT;
3382                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3383                                BNX2_EMAC_MODE_ACPI_RCVD |
3384                                BNX2_EMAC_MODE_MPKT;
3385                         if (bp->phy_port == PORT_TP)
3386                                 val |= BNX2_EMAC_MODE_PORT_MII;
3387                         else {
3388                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3389                                 if (bp->line_speed == SPEED_2500)
3390                                         val |= BNX2_EMAC_MODE_25G_MODE;
3391                         }
3392
3393                         REG_WR(bp, BNX2_EMAC_MODE, val);
3394
3395                         /* receive all multicast */
3396                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3397                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3398                                        0xffffffff);
3399                         }
3400                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3401                                BNX2_EMAC_RX_MODE_SORT_MODE);
3402
3403                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3404                               BNX2_RPM_SORT_USER0_MC_EN;
3405                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3406                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3407                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3408                                BNX2_RPM_SORT_USER0_ENA);
3409
3410                         /* Need to enable EMAC and RPM for WOL. */
3411                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3412                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3413                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3414                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3415
3416                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3417                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3418                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3419
3420                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3421                 }
3422                 else {
3423                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3424                 }
3425
3426                 if (!(bp->flags & NO_WOL_FLAG))
3427                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3428
3429                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3430                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3431                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3432
3433                         if (bp->wol)
3434                                 pmcsr |= 3;
3435                 }
3436                 else {
3437                         pmcsr |= 3;
3438                 }
3439                 if (bp->wol) {
3440                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3441                 }
3442                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3443                                       pmcsr);
3444
3445                 /* No more memory access after this point until
3446                  * device is brought back to D0.
3447                  */
3448                 udelay(50);
3449                 break;
3450         }
3451         default:
3452                 return -EINVAL;
3453         }
3454         return 0;
3455 }
3456
3457 static int
3458 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3459 {
3460         u32 val;
3461         int j;
3462
3463         /* Request access to the flash interface. */
3464         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3465         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3466                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3467                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3468                         break;
3469
3470                 udelay(5);
3471         }
3472
3473         if (j >= NVRAM_TIMEOUT_COUNT)
3474                 return -EBUSY;
3475
3476         return 0;
3477 }
3478
3479 static int
3480 bnx2_release_nvram_lock(struct bnx2 *bp)
3481 {
3482         int j;
3483         u32 val;
3484
3485         /* Relinquish nvram interface. */
3486         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3487
3488         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3489                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3490                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3491                         break;
3492
3493                 udelay(5);
3494         }
3495
3496         if (j >= NVRAM_TIMEOUT_COUNT)
3497                 return -EBUSY;
3498
3499         return 0;
3500 }
3501
3502
3503 static int
3504 bnx2_enable_nvram_write(struct bnx2 *bp)
3505 {
3506         u32 val;
3507
3508         val = REG_RD(bp, BNX2_MISC_CFG);
3509         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3510
3511         if (bp->flash_info->flags & BNX2_NV_WREN) {
3512                 int j;
3513
3514                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3515                 REG_WR(bp, BNX2_NVM_COMMAND,
3516                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3517
3518                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3519                         udelay(5);
3520
3521                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3522                         if (val & BNX2_NVM_COMMAND_DONE)
3523                                 break;
3524                 }
3525
3526                 if (j >= NVRAM_TIMEOUT_COUNT)
3527                         return -EBUSY;
3528         }
3529         return 0;
3530 }
3531
3532 static void
3533 bnx2_disable_nvram_write(struct bnx2 *bp)
3534 {
3535         u32 val;
3536
3537         val = REG_RD(bp, BNX2_MISC_CFG);
3538         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3539 }
3540
3541
3542 static void
3543 bnx2_enable_nvram_access(struct bnx2 *bp)
3544 {
3545         u32 val;
3546
3547         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3548         /* Enable both bits, even on read. */
3549         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3550                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3551 }
3552
3553 static void
3554 bnx2_disable_nvram_access(struct bnx2 *bp)
3555 {
3556         u32 val;
3557
3558         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3559         /* Disable both bits, even after read. */
3560         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3561                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3562                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3563 }
3564
3565 static int
3566 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3567 {
3568         u32 cmd;
3569         int j;
3570
3571         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3572                 /* Buffered flash, no erase needed */
3573                 return 0;
3574
3575         /* Build an erase command */
3576         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3577               BNX2_NVM_COMMAND_DOIT;
3578
3579         /* Need to clear DONE bit separately. */
3580         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3581
3582         /* Address of the NVRAM to read from. */
3583         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3584
3585         /* Issue an erase command. */
3586         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3587
3588         /* Wait for completion. */
3589         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3590                 u32 val;
3591
3592                 udelay(5);
3593
3594                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3595                 if (val & BNX2_NVM_COMMAND_DONE)
3596                         break;
3597         }
3598
3599         if (j >= NVRAM_TIMEOUT_COUNT)
3600                 return -EBUSY;
3601
3602         return 0;
3603 }
3604
3605 static int
3606 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3607 {
3608         u32 cmd;
3609         int j;
3610
3611         /* Build the command word. */
3612         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3613
3614         /* Calculate an offset of a buffered flash, not needed for 5709. */
3615         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3616                 offset = ((offset / bp->flash_info->page_size) <<
3617                            bp->flash_info->page_bits) +
3618                           (offset % bp->flash_info->page_size);
3619         }
3620
3621         /* Need to clear DONE bit separately. */
3622         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3623
3624         /* Address of the NVRAM to read from. */
3625         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3626
3627         /* Issue a read command. */
3628         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3629
3630         /* Wait for completion. */
3631         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3632                 u32 val;
3633
3634                 udelay(5);
3635
3636                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3637                 if (val & BNX2_NVM_COMMAND_DONE) {
3638                         val = REG_RD(bp, BNX2_NVM_READ);
3639
3640                         val = be32_to_cpu(val);
3641                         memcpy(ret_val, &val, 4);
3642                         break;
3643                 }
3644         }
3645         if (j >= NVRAM_TIMEOUT_COUNT)
3646                 return -EBUSY;
3647
3648         return 0;
3649 }
3650
3651
3652 static int
3653 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3654 {
3655         u32 cmd, val32;
3656         int j;
3657
3658         /* Build the command word. */
3659         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3660
3661         /* Calculate an offset of a buffered flash, not needed for 5709. */
3662         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3663                 offset = ((offset / bp->flash_info->page_size) <<
3664                           bp->flash_info->page_bits) +
3665                          (offset % bp->flash_info->page_size);
3666         }
3667
3668         /* Need to clear DONE bit separately. */
3669         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3670
3671         memcpy(&val32, val, 4);
3672         val32 = cpu_to_be32(val32);
3673
3674         /* Write the data. */
3675         REG_WR(bp, BNX2_NVM_WRITE, val32);
3676
3677         /* Address of the NVRAM to write to. */
3678         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3679
3680         /* Issue the write command. */
3681         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3682
3683         /* Wait for completion. */
3684         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3685                 udelay(5);
3686
3687                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3688                         break;
3689         }
3690         if (j >= NVRAM_TIMEOUT_COUNT)
3691                 return -EBUSY;
3692
3693         return 0;
3694 }
3695
3696 static int
3697 bnx2_init_nvram(struct bnx2 *bp)
3698 {
3699         u32 val;
3700         int j, entry_count, rc = 0;
3701         struct flash_spec *flash;
3702
3703         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3704                 bp->flash_info = &flash_5709;
3705                 goto get_flash_size;
3706         }
3707
3708         /* Determine the selected interface. */
3709         val = REG_RD(bp, BNX2_NVM_CFG1);
3710
3711         entry_count = ARRAY_SIZE(flash_table);
3712
3713         if (val & 0x40000000) {
3714
3715                 /* Flash interface has been reconfigured */
3716                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3717                      j++, flash++) {
3718                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3719                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3720                                 bp->flash_info = flash;
3721                                 break;
3722                         }
3723                 }
3724         }
3725         else {
3726                 u32 mask;
3727                 /* Not yet been reconfigured */
3728
3729                 if (val & (1 << 23))
3730                         mask = FLASH_BACKUP_STRAP_MASK;
3731                 else
3732                         mask = FLASH_STRAP_MASK;
3733
3734                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3735                         j++, flash++) {
3736
3737                         if ((val & mask) == (flash->strapping & mask)) {
3738                                 bp->flash_info = flash;
3739
3740                                 /* Request access to the flash interface. */
3741                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3742                                         return rc;
3743
3744                                 /* Enable access to flash interface */
3745                                 bnx2_enable_nvram_access(bp);
3746
3747                                 /* Reconfigure the flash interface */
3748                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3749                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3750                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3751                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3752
3753                                 /* Disable access to flash interface */
3754                                 bnx2_disable_nvram_access(bp);
3755                                 bnx2_release_nvram_lock(bp);
3756
3757                                 break;
3758                         }
3759                 }
3760         } /* if (val & 0x40000000) */
3761
3762         if (j == entry_count) {
3763                 bp->flash_info = NULL;
3764                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3765                 return -ENODEV;
3766         }
3767
3768 get_flash_size:
3769         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3770         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3771         if (val)
3772                 bp->flash_size = val;
3773         else
3774                 bp->flash_size = bp->flash_info->total_size;
3775
3776         return rc;
3777 }
3778
3779 static int
3780 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3781                 int buf_size)
3782 {
3783         int rc = 0;
3784         u32 cmd_flags, offset32, len32, extra;
3785
3786         if (buf_size == 0)
3787                 return 0;
3788
3789         /* Request access to the flash interface. */
3790         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3791                 return rc;
3792
3793         /* Enable access to flash interface */
3794         bnx2_enable_nvram_access(bp);
3795
3796         len32 = buf_size;
3797         offset32 = offset;
3798         extra = 0;
3799
3800         cmd_flags = 0;
3801
3802         if (offset32 & 3) {
3803                 u8 buf[4];
3804                 u32 pre_len;
3805
3806                 offset32 &= ~3;
3807                 pre_len = 4 - (offset & 3);
3808
3809                 if (pre_len >= len32) {
3810                         pre_len = len32;
3811                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3812                                     BNX2_NVM_COMMAND_LAST;
3813                 }
3814                 else {
3815                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816                 }
3817
3818                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3819
3820                 if (rc)
3821                         return rc;
3822
3823                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3824
3825                 offset32 += 4;
3826                 ret_buf += pre_len;
3827                 len32 -= pre_len;
3828         }
3829         if (len32 & 3) {
3830                 extra = 4 - (len32 & 3);
3831                 len32 = (len32 + 4) & ~3;
3832         }
3833
3834         if (len32 == 4) {
3835                 u8 buf[4];
3836
3837                 if (cmd_flags)
3838                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3839                 else
3840                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3841                                     BNX2_NVM_COMMAND_LAST;
3842
3843                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3844
3845                 memcpy(ret_buf, buf, 4 - extra);
3846         }
3847         else if (len32 > 0) {
3848                 u8 buf[4];
3849
3850                 /* Read the first word. */
3851                 if (cmd_flags)
3852                         cmd_flags = 0;
3853                 else
3854                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3855
3856                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3857
3858                 /* Advance to the next dword. */
3859                 offset32 += 4;
3860                 ret_buf += 4;
3861                 len32 -= 4;
3862
3863                 while (len32 > 4 && rc == 0) {
3864                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3865
3866                         /* Advance to the next dword. */
3867                         offset32 += 4;
3868                         ret_buf += 4;
3869                         len32 -= 4;
3870                 }
3871
3872                 if (rc)
3873                         return rc;
3874
3875                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3876                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3877
3878                 memcpy(ret_buf, buf, 4 - extra);
3879         }
3880
3881         /* Disable access to flash interface */
3882         bnx2_disable_nvram_access(bp);
3883
3884         bnx2_release_nvram_lock(bp);
3885
3886         return rc;
3887 }
3888
3889 static int
3890 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3891                 int buf_size)
3892 {
3893         u32 written, offset32, len32;
3894         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3895         int rc = 0;
3896         int align_start, align_end;
3897
3898         buf = data_buf;
3899         offset32 = offset;
3900         len32 = buf_size;
3901         align_start = align_end = 0;
3902
3903         if ((align_start = (offset32 & 3))) {
3904                 offset32 &= ~3;
3905                 len32 += align_start;
3906                 if (len32 < 4)
3907                         len32 = 4;
3908                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3909                         return rc;
3910         }
3911
3912         if (len32 & 3) {
3913                 align_end = 4 - (len32 & 3);
3914                 len32 += align_end;
3915                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3916                         return rc;
3917         }
3918
3919         if (align_start || align_end) {
3920                 align_buf = kmalloc(len32, GFP_KERNEL);
3921                 if (align_buf == NULL)
3922                         return -ENOMEM;
3923                 if (align_start) {
3924                         memcpy(align_buf, start, 4);
3925                 }
3926                 if (align_end) {
3927                         memcpy(align_buf + len32 - 4, end, 4);
3928                 }
3929                 memcpy(align_buf + align_start, data_buf, buf_size);
3930                 buf = align_buf;
3931         }
3932
3933         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3934                 flash_buffer = kmalloc(264, GFP_KERNEL);
3935                 if (flash_buffer == NULL) {
3936                         rc = -ENOMEM;
3937                         goto nvram_write_end;
3938                 }
3939         }
3940
3941         written = 0;
3942         while ((written < len32) && (rc == 0)) {
3943                 u32 page_start, page_end, data_start, data_end;
3944                 u32 addr, cmd_flags;
3945                 int i;
3946
3947                 /* Find the page_start addr */
3948                 page_start = offset32 + written;
3949                 page_start -= (page_start % bp->flash_info->page_size);
3950                 /* Find the page_end addr */
3951                 page_end = page_start + bp->flash_info->page_size;
3952                 /* Find the data_start addr */
3953                 data_start = (written == 0) ? offset32 : page_start;
3954                 /* Find the data_end addr */
3955                 data_end = (page_end > offset32 + len32) ?
3956                         (offset32 + len32) : page_end;
3957
3958                 /* Request access to the flash interface. */
3959                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3960                         goto nvram_write_end;
3961
3962                 /* Enable access to flash interface */
3963                 bnx2_enable_nvram_access(bp);
3964
3965                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3966                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3967                         int j;
3968
3969                         /* Read the whole page into the buffer
3970                          * (non-buffer flash only) */
3971                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3972                                 if (j == (bp->flash_info->page_size - 4)) {
3973                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3974                                 }
3975                                 rc = bnx2_nvram_read_dword(bp,
3976                                         page_start + j,
3977                                         &flash_buffer[j],
3978                                         cmd_flags);
3979
3980                                 if (rc)
3981                                         goto nvram_write_end;
3982
3983                                 cmd_flags = 0;
3984                         }
3985                 }
3986
3987                 /* Enable writes to flash interface (unlock write-protect) */
3988                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3989                         goto nvram_write_end;
3990
3991                 /* Loop to write back the buffer data from page_start to
3992                  * data_start */
3993                 i = 0;
3994                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3995                         /* Erase the page */
3996                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3997                                 goto nvram_write_end;
3998
3999                         /* Re-enable the write again for the actual write */
4000                         bnx2_enable_nvram_write(bp);
4001
4002                         for (addr = page_start; addr < data_start;
4003                                 addr += 4, i += 4) {
4004
4005                                 rc = bnx2_nvram_write_dword(bp, addr,
4006                                         &flash_buffer[i], cmd_flags);
4007
4008                                 if (rc != 0)
4009                                         goto nvram_write_end;
4010
4011                                 cmd_flags = 0;
4012                         }
4013                 }
4014
4015                 /* Loop to write the new data from data_start to data_end */
4016                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4017                         if ((addr == page_end - 4) ||
4018                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4019                                  (addr == data_end - 4))) {
4020
4021                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4022                         }
4023                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4024                                 cmd_flags);
4025
4026                         if (rc != 0)
4027                                 goto nvram_write_end;
4028
4029                         cmd_flags = 0;
4030                         buf += 4;
4031                 }
4032
4033                 /* Loop to write back the buffer data from data_end
4034                  * to page_end */
4035                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4036                         for (addr = data_end; addr < page_end;
4037                                 addr += 4, i += 4) {
4038
4039                                 if (addr == page_end-4) {
4040                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4041                                 }
4042                                 rc = bnx2_nvram_write_dword(bp, addr,
4043                                         &flash_buffer[i], cmd_flags);
4044
4045                                 if (rc != 0)
4046                                         goto nvram_write_end;
4047
4048                                 cmd_flags = 0;
4049                         }
4050                 }
4051
4052                 /* Disable writes to flash interface (lock write-protect) */
4053                 bnx2_disable_nvram_write(bp);
4054
4055                 /* Disable access to flash interface */
4056                 bnx2_disable_nvram_access(bp);
4057                 bnx2_release_nvram_lock(bp);
4058
4059                 /* Increment written */
4060                 written += data_end - data_start;
4061         }
4062
4063 nvram_write_end:
4064         kfree(flash_buffer);
4065         kfree(align_buf);
4066         return rc;
4067 }
4068
4069 static void
4070 bnx2_init_remote_phy(struct bnx2 *bp)
4071 {
4072         u32 val;
4073
4074         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4075         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4076                 return;
4077
4078         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4079         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4080                 return;
4081
4082         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4083                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4084
4085                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4086                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4087                         bp->phy_port = PORT_FIBRE;
4088                 else
4089                         bp->phy_port = PORT_TP;
4090
4091                 if (netif_running(bp->dev)) {
4092                         u32 sig;
4093
4094                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4095                                 bp->link_up = 1;
4096                                 netif_carrier_on(bp->dev);
4097                         } else {
4098                                 bp->link_up = 0;
4099                                 netif_carrier_off(bp->dev);
4100                         }
4101                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4102                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4103                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4104                                    sig);
4105                 }
4106         }
4107 }
4108
4109 static void
4110 bnx2_setup_msix_tbl(struct bnx2 *bp)
4111 {
4112         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4113
4114         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4115         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4116 }
4117
4118 static int
4119 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4120 {
4121         u32 val;
4122         int i, rc = 0;
4123         u8 old_port;
4124
4125         /* Wait for the current PCI transaction to complete before
4126          * issuing a reset. */
4127         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4128                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4129                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4130                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4131                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4132         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4133         udelay(5);
4134
4135         /* Wait for the firmware to tell us it is ok to issue a reset. */
4136         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4137
4138         /* Deposit a driver reset signature so the firmware knows that
4139          * this is a soft reset. */
4140         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4141                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4142
4143         /* Do a dummy read to force the chip to complete all current transaction
4144          * before we issue a reset. */
4145         val = REG_RD(bp, BNX2_MISC_ID);
4146
4147         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4148                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4149                 REG_RD(bp, BNX2_MISC_COMMAND);
4150                 udelay(5);
4151
4152                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4153                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4154
4155                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4156
4157         } else {
4158                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4159                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4160                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4161
4162                 /* Chip reset. */
4163                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4164
4165                 /* Reading back any register after chip reset will hang the
4166                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4167                  * of margin for write posting.
4168                  */
4169                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4170                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4171                         msleep(20);
4172
4173                 /* Reset takes approximate 30 usec */
4174                 for (i = 0; i < 10; i++) {
4175                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4176                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4177                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4178                                 break;
4179                         udelay(10);
4180                 }
4181
4182                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4183                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4184                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4185                         return -EBUSY;
4186                 }
4187         }
4188
4189         /* Make sure byte swapping is properly configured. */
4190         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4191         if (val != 0x01020304) {
4192                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4193                 return -ENODEV;
4194         }
4195
4196         /* Wait for the firmware to finish its initialization. */
4197         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4198         if (rc)
4199                 return rc;
4200
4201         spin_lock_bh(&bp->phy_lock);
4202         old_port = bp->phy_port;
4203         bnx2_init_remote_phy(bp);
4204         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4205                 bnx2_set_default_remote_link(bp);
4206         spin_unlock_bh(&bp->phy_lock);
4207
4208         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4209                 /* Adjust the voltage regular to two steps lower.  The default
4210                  * of this register is 0x0000000e. */
4211                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4212
4213                 /* Remove bad rbuf memory from the free pool. */
4214                 rc = bnx2_alloc_bad_rbuf(bp);
4215         }
4216
4217         if (bp->flags & USING_MSIX_FLAG)
4218                 bnx2_setup_msix_tbl(bp);
4219
4220         return rc;
4221 }
4222
4223 static int
4224 bnx2_init_chip(struct bnx2 *bp)
4225 {
4226         u32 val;
4227         int rc, i;
4228
4229         /* Make sure the interrupt is not active. */
4230         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4231
4232         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4233               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4234 #ifdef __BIG_ENDIAN
4235               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4236 #endif
4237               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4238               DMA_READ_CHANS << 12 |
4239               DMA_WRITE_CHANS << 16;
4240
4241         val |= (0x2 << 20) | (1 << 11);
4242
4243         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4244                 val |= (1 << 23);
4245
4246         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4247             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4248                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4249
4250         REG_WR(bp, BNX2_DMA_CONFIG, val);
4251
4252         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4253                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4254                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4255                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4256         }
4257
4258         if (bp->flags & PCIX_FLAG) {
4259                 u16 val16;
4260
4261                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4262                                      &val16);
4263                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4264                                       val16 & ~PCI_X_CMD_ERO);
4265         }
4266
4267         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4268                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4269                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4270                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4271
4272         /* Initialize context mapping and zero out the quick contexts.  The
4273          * context block must have already been enabled. */
4274         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4275                 rc = bnx2_init_5709_context(bp);
4276                 if (rc)
4277                         return rc;
4278         } else
4279                 bnx2_init_context(bp);
4280
4281         if ((rc = bnx2_init_cpus(bp)) != 0)
4282                 return rc;
4283
4284         bnx2_init_nvram(bp);
4285
4286         bnx2_set_mac_addr(bp);
4287
4288         val = REG_RD(bp, BNX2_MQ_CONFIG);
4289         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4290         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4291         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4292                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4293
4294         REG_WR(bp, BNX2_MQ_CONFIG, val);
4295
4296         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4297         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4298         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4299
4300         val = (BCM_PAGE_BITS - 8) << 24;
4301         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4302
4303         /* Configure page size. */
4304         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4305         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4306         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4307         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4308
4309         val = bp->mac_addr[0] +
4310               (bp->mac_addr[1] << 8) +
4311               (bp->mac_addr[2] << 16) +
4312               bp->mac_addr[3] +
4313               (bp->mac_addr[4] << 8) +
4314               (bp->mac_addr[5] << 16);
4315         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4316
4317         /* Program the MTU.  Also include 4 bytes for CRC32. */
4318         val = bp->dev->mtu + ETH_HLEN + 4;
4319         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4320                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4321         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4322
4323         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4324                 bp->bnx2_napi[i].last_status_idx = 0;
4325
4326         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4327
4328         /* Set up how to generate a link change interrupt. */
4329         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4330
4331         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4332                (u64) bp->status_blk_mapping & 0xffffffff);
4333         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4334
4335         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4336                (u64) bp->stats_blk_mapping & 0xffffffff);
4337         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4338                (u64) bp->stats_blk_mapping >> 32);
4339
4340         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4341                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4342
4343         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4344                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4345
4346         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4347                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4348
4349         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4350
4351         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4352
4353         REG_WR(bp, BNX2_HC_COM_TICKS,
4354                (bp->com_ticks_int << 16) | bp->com_ticks);
4355
4356         REG_WR(bp, BNX2_HC_CMD_TICKS,
4357                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4358
4359         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4360                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4361         else
4362                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4363         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4364
4365         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4366                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4367         else {
4368                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4369                       BNX2_HC_CONFIG_COLLECT_STATS;
4370         }
4371
4372         if (bp->flags & USING_MSIX_FLAG) {
4373                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4374                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4375
4376                 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4377                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4378                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4379
4380                 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4381                         (bp->tx_quick_cons_trip_int << 16) |
4382                          bp->tx_quick_cons_trip);
4383
4384                 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4385                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4386
4387                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4388         }
4389
4390         if (bp->flags & ONE_SHOT_MSI_FLAG)
4391                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4392
4393         REG_WR(bp, BNX2_HC_CONFIG, val);
4394
4395         /* Clear internal stats counters. */
4396         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4397
4398         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4399
4400         /* Initialize the receive filter. */
4401         bnx2_set_rx_mode(bp->dev);
4402
4403         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4404                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4405                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4406                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4407         }
4408         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4409                           0);
4410
4411         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4412         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4413
4414         udelay(20);
4415
4416         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4417
4418         return rc;
4419 }
4420
4421 static void
4422 bnx2_clear_ring_states(struct bnx2 *bp)
4423 {
4424         struct bnx2_napi *bnapi;
4425         int i;
4426
4427         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4428                 bnapi = &bp->bnx2_napi[i];
4429
4430                 bnapi->tx_cons = 0;
4431                 bnapi->hw_tx_cons = 0;
4432                 bnapi->rx_prod_bseq = 0;
4433                 bnapi->rx_prod = 0;
4434                 bnapi->rx_cons = 0;
4435                 bnapi->rx_pg_prod = 0;
4436                 bnapi->rx_pg_cons = 0;
4437         }
4438 }
4439
4440 static void
4441 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4442 {
4443         u32 val, offset0, offset1, offset2, offset3;
4444
4445         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4446                 offset0 = BNX2_L2CTX_TYPE_XI;
4447                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4448                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4449                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4450         } else {
4451                 offset0 = BNX2_L2CTX_TYPE;
4452                 offset1 = BNX2_L2CTX_CMD_TYPE;
4453                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4454                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4455         }
4456         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4457         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4458
4459         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4460         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4461
4462         val = (u64) bp->tx_desc_mapping >> 32;
4463         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4464
4465         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4466         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4467 }
4468
4469 static void
4470 bnx2_init_tx_ring(struct bnx2 *bp)
4471 {
4472         struct tx_bd *txbd;
4473         u32 cid = TX_CID;
4474         struct bnx2_napi *bnapi;
4475
4476         bp->tx_vec = 0;
4477         if (bp->flags & USING_MSIX_FLAG) {
4478                 cid = TX_TSS_CID;
4479                 bp->tx_vec = BNX2_TX_VEC;
4480                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4481                        (TX_TSS_CID << 7));
4482         }
4483         bnapi = &bp->bnx2_napi[bp->tx_vec];
4484
4485         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4486
4487         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4488
4489         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4490         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4491
4492         bp->tx_prod = 0;
4493         bp->tx_prod_bseq = 0;
4494
4495         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4496         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4497
4498         bnx2_init_tx_context(bp, cid);
4499 }
4500
4501 static void
4502 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4503                      int num_rings)
4504 {
4505         int i;
4506         struct rx_bd *rxbd;
4507
4508         for (i = 0; i < num_rings; i++) {
4509                 int j;
4510
4511                 rxbd = &rx_ring[i][0];
4512                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4513                         rxbd->rx_bd_len = buf_size;
4514                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4515                 }
4516                 if (i == (num_rings - 1))
4517                         j = 0;
4518                 else
4519                         j = i + 1;
4520                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4521                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4522         }
4523 }
4524
4525 static void
4526 bnx2_init_rx_ring(struct bnx2 *bp)
4527 {
4528         int i;
4529         u16 prod, ring_prod;
4530         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4531         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4532
4533         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4534                              bp->rx_buf_use_size, bp->rx_max_ring);
4535
4536         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4537         if (bp->rx_pg_ring_size) {
4538                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4539                                      bp->rx_pg_desc_mapping,
4540                                      PAGE_SIZE, bp->rx_max_pg_ring);
4541                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4542                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4543                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4544                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4545
4546                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4547                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4548
4549                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4550                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4551
4552                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4553                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4554         }
4555
4556         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4557         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4558         val |= 0x02 << 8;
4559         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4560
4561         val = (u64) bp->rx_desc_mapping[0] >> 32;
4562         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4563
4564         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4565         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4566
4567         ring_prod = prod = bnapi->rx_pg_prod;
4568         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4569                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4570                         break;
4571                 prod = NEXT_RX_BD(prod);
4572                 ring_prod = RX_PG_RING_IDX(prod);
4573         }
4574         bnapi->rx_pg_prod = prod;
4575
4576         ring_prod = prod = bnapi->rx_prod;
4577         for (i = 0; i < bp->rx_ring_size; i++) {
4578                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4579                         break;
4580                 }
4581                 prod = NEXT_RX_BD(prod);
4582                 ring_prod = RX_RING_IDX(prod);
4583         }
4584         bnapi->rx_prod = prod;
4585
4586         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4587                  bnapi->rx_pg_prod);
4588         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4589
4590         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4591 }
4592
4593 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4594 {
4595         u32 max, num_rings = 1;
4596
4597         while (ring_size > MAX_RX_DESC_CNT) {
4598                 ring_size -= MAX_RX_DESC_CNT;
4599                 num_rings++;
4600         }
4601         /* round to next power of 2 */
4602         max = max_size;
4603         while ((max & num_rings) == 0)
4604                 max >>= 1;
4605
4606         if (num_rings != max)
4607                 max <<= 1;
4608
4609         return max;
4610 }
4611
4612 static void
4613 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4614 {
4615         u32 rx_size, rx_space, jumbo_size;
4616
4617         /* 8 for CRC and VLAN */
4618         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4619
4620         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4621                 sizeof(struct skb_shared_info);
4622
4623         bp->rx_copy_thresh = RX_COPY_THRESH;
4624         bp->rx_pg_ring_size = 0;
4625         bp->rx_max_pg_ring = 0;
4626         bp->rx_max_pg_ring_idx = 0;
4627         if (rx_space > PAGE_SIZE) {
4628                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4629
4630                 jumbo_size = size * pages;
4631                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4632                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4633
4634                 bp->rx_pg_ring_size = jumbo_size;
4635                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4636                                                         MAX_RX_PG_RINGS);
4637                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4638                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4639                 bp->rx_copy_thresh = 0;
4640         }
4641
4642         bp->rx_buf_use_size = rx_size;
4643         /* hw alignment */
4644         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4645         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4646         bp->rx_ring_size = size;
4647         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4648         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4649 }
4650
4651 static void
4652 bnx2_free_tx_skbs(struct bnx2 *bp)
4653 {
4654         int i;
4655
4656         if (bp->tx_buf_ring == NULL)
4657                 return;
4658
4659         for (i = 0; i < TX_DESC_CNT; ) {
4660                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4661                 struct sk_buff *skb = tx_buf->skb;
4662                 int j, last;
4663
4664                 if (skb == NULL) {
4665                         i++;
4666                         continue;
4667                 }
4668
4669                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4670                         skb_headlen(skb), PCI_DMA_TODEVICE);
4671
4672                 tx_buf->skb = NULL;
4673
4674                 last = skb_shinfo(skb)->nr_frags;
4675                 for (j = 0; j < last; j++) {
4676                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4677                         pci_unmap_page(bp->pdev,
4678                                 pci_unmap_addr(tx_buf, mapping),
4679                                 skb_shinfo(skb)->frags[j].size,
4680                                 PCI_DMA_TODEVICE);
4681                 }
4682                 dev_kfree_skb(skb);
4683                 i += j + 1;
4684         }
4685
4686 }
4687
4688 static void
4689 bnx2_free_rx_skbs(struct bnx2 *bp)
4690 {
4691         int i;
4692
4693         if (bp->rx_buf_ring == NULL)
4694                 return;
4695
4696         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4697                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4698                 struct sk_buff *skb = rx_buf->skb;
4699
4700                 if (skb == NULL)
4701                         continue;
4702
4703                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4704                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4705
4706                 rx_buf->skb = NULL;
4707
4708                 dev_kfree_skb(skb);
4709         }
4710         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4711                 bnx2_free_rx_page(bp, i);
4712 }
4713
4714 static void
4715 bnx2_free_skbs(struct bnx2 *bp)
4716 {
4717         bnx2_free_tx_skbs(bp);
4718         bnx2_free_rx_skbs(bp);
4719 }
4720
4721 static int
4722 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4723 {
4724         int rc;
4725
4726         rc = bnx2_reset_chip(bp, reset_code);
4727         bnx2_free_skbs(bp);
4728         if (rc)
4729                 return rc;
4730
4731         if ((rc = bnx2_init_chip(bp)) != 0)
4732                 return rc;
4733
4734         bnx2_clear_ring_states(bp);
4735         bnx2_init_tx_ring(bp);
4736         bnx2_init_rx_ring(bp);
4737         return 0;
4738 }
4739
4740 static int
4741 bnx2_init_nic(struct bnx2 *bp)
4742 {
4743         int rc;
4744
4745         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4746                 return rc;
4747
4748         spin_lock_bh(&bp->phy_lock);
4749         bnx2_init_phy(bp);
4750         bnx2_set_link(bp);
4751         spin_unlock_bh(&bp->phy_lock);
4752         return 0;
4753 }
4754
4755 static int
4756 bnx2_test_registers(struct bnx2 *bp)
4757 {
4758         int ret;
4759         int i, is_5709;
4760         static const struct {
4761                 u16   offset;
4762                 u16   flags;
4763 #define BNX2_FL_NOT_5709        1
4764                 u32   rw_mask;
4765                 u32   ro_mask;
4766         } reg_tbl[] = {
4767                 { 0x006c, 0, 0x00000000, 0x0000003f },
4768                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4769                 { 0x0094, 0, 0x00000000, 0x00000000 },
4770
4771                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4772                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4773                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4774                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4775                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4776                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4777                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4778                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4779                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4780
4781                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4782                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4783                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4784                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4785                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4786                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4787
4788                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4789                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4790                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4791
4792                 { 0x1000, 0, 0x00000000, 0x00000001 },
4793                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4794
4795                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4796                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4797                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4798                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4799                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4800                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4801                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4802                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4803                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4804                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4805
4806                 { 0x1800, 0, 0x00000000, 0x00000001 },
4807                 { 0x1804, 0, 0x00000000, 0x00000003 },
4808
4809                 { 0x2800, 0, 0x00000000, 0x00000001 },
4810                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4811                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4812                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4813                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4814                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4815                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4816                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4817                 { 0x2840, 0, 0x00000000, 0xffffffff },
4818                 { 0x2844, 0, 0x00000000, 0xffffffff },
4819                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4820                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4821
4822                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4823                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4824
4825                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4826                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4827                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4828                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4829                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4830                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4831                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4832                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4833                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4834
4835                 { 0x5004, 0, 0x00000000, 0x0000007f },
4836                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4837
4838                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4839                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4840                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4841                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4842                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4843                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4844                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4845                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4846                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4847
4848                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4849                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4850                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4851                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4852                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4853                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4854                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4855                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4856                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4857                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4858                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4859                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4860                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4861                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4862                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4863                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4864                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4865                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4866                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4867                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4868                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4869                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4870                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4871
4872                 { 0xffff, 0, 0x00000000, 0x00000000 },
4873         };
4874
4875         ret = 0;
4876         is_5709 = 0;
4877         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4878                 is_5709 = 1;
4879
4880         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4881                 u32 offset, rw_mask, ro_mask, save_val, val;
4882                 u16 flags = reg_tbl[i].flags;
4883
4884                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4885                         continue;
4886
4887                 offset = (u32) reg_tbl[i].offset;
4888                 rw_mask = reg_tbl[i].rw_mask;
4889                 ro_mask = reg_tbl[i].ro_mask;
4890
4891                 save_val = readl(bp->regview + offset);
4892
4893                 writel(0, bp->regview + offset);
4894
4895                 val = readl(bp->regview + offset);
4896                 if ((val & rw_mask) != 0) {
4897                         goto reg_test_err;
4898                 }
4899
4900                 if ((val & ro_mask) != (save_val & ro_mask)) {
4901                         goto reg_test_err;
4902                 }
4903
4904                 writel(0xffffffff, bp->regview + offset);
4905
4906                 val = readl(bp->regview + offset);
4907                 if ((val & rw_mask) != rw_mask) {
4908                         goto reg_test_err;
4909                 }
4910
4911                 if ((val & ro_mask) != (save_val & ro_mask)) {
4912                         goto reg_test_err;
4913                 }
4914
4915                 writel(save_val, bp->regview + offset);
4916                 continue;
4917
4918 reg_test_err:
4919                 writel(save_val, bp->regview + offset);
4920                 ret = -ENODEV;
4921                 break;
4922         }
4923         return ret;
4924 }
4925
4926 static int
4927 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4928 {
4929         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4930                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4931         int i;
4932
4933         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4934                 u32 offset;
4935
4936                 for (offset = 0; offset < size; offset += 4) {
4937
4938                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4939
4940                         if (REG_RD_IND(bp, start + offset) !=
4941                                 test_pattern[i]) {
4942                                 return -ENODEV;
4943                         }
4944                 }
4945         }
4946         return 0;
4947 }
4948
4949 static int
4950 bnx2_test_memory(struct bnx2 *bp)
4951 {
4952         int ret = 0;
4953         int i;
4954         static struct mem_entry {
4955                 u32   offset;
4956                 u32   len;
4957         } mem_tbl_5706[] = {
4958                 { 0x60000,  0x4000 },
4959                 { 0xa0000,  0x3000 },
4960                 { 0xe0000,  0x4000 },
4961                 { 0x120000, 0x4000 },
4962                 { 0x1a0000, 0x4000 },
4963                 { 0x160000, 0x4000 },
4964                 { 0xffffffff, 0    },
4965         },
4966         mem_tbl_5709[] = {
4967                 { 0x60000,  0x4000 },
4968                 { 0xa0000,  0x3000 },
4969                 { 0xe0000,  0x4000 },
4970                 { 0x120000, 0x4000 },
4971                 { 0x1a0000, 0x4000 },
4972                 { 0xffffffff, 0    },
4973         };
4974         struct mem_entry *mem_tbl;
4975
4976         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4977                 mem_tbl = mem_tbl_5709;
4978         else
4979                 mem_tbl = mem_tbl_5706;
4980
4981         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4982                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4983                         mem_tbl[i].len)) != 0) {
4984                         return ret;
4985                 }
4986         }
4987
4988         return ret;
4989 }
4990
4991 #define BNX2_MAC_LOOPBACK       0
4992 #define BNX2_PHY_LOOPBACK       1
4993
4994 static int
4995 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4996 {
4997         unsigned int pkt_size, num_pkts, i;
4998         struct sk_buff *skb, *rx_skb;
4999         unsigned char *packet;
5000         u16 rx_start_idx, rx_idx;
5001         dma_addr_t map;
5002         struct tx_bd *txbd;
5003         struct sw_bd *rx_buf;
5004         struct l2_fhdr *rx_hdr;
5005         int ret = -ENODEV;
5006         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5007
5008         tx_napi = bnapi;
5009         if (bp->flags & USING_MSIX_FLAG)
5010                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5011
5012         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5013                 bp->loopback = MAC_LOOPBACK;
5014                 bnx2_set_mac_loopback(bp);
5015         }
5016         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5017                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5018                         return 0;
5019
5020                 bp->loopback = PHY_LOOPBACK;
5021                 bnx2_set_phy_loopback(bp);
5022         }
5023         else
5024                 return -EINVAL;
5025
5026         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5027         skb = netdev_alloc_skb(bp->dev, pkt_size);
5028         if (!skb)
5029                 return -ENOMEM;
5030         packet = skb_put(skb, pkt_size);
5031         memcpy(packet, bp->dev->dev_addr, 6);
5032         memset(packet + 6, 0x0, 8);
5033         for (i = 14; i < pkt_size; i++)
5034                 packet[i] = (unsigned char) (i & 0xff);
5035
5036         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5037                 PCI_DMA_TODEVICE);
5038
5039         REG_WR(bp, BNX2_HC_COMMAND,
5040                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5041
5042         REG_RD(bp, BNX2_HC_COMMAND);
5043
5044         udelay(5);
5045         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5046
5047         num_pkts = 0;
5048
5049         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5050
5051         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5052         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5053         txbd->tx_bd_mss_nbytes = pkt_size;
5054         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5055
5056         num_pkts++;
5057         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5058         bp->tx_prod_bseq += pkt_size;
5059
5060         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5061         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5062
5063         udelay(100);
5064
5065         REG_WR(bp, BNX2_HC_COMMAND,
5066                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5067
5068         REG_RD(bp, BNX2_HC_COMMAND);
5069
5070         udelay(5);
5071
5072         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5073         dev_kfree_skb(skb);
5074
5075         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5076                 goto loopback_test_done;
5077
5078         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5079         if (rx_idx != rx_start_idx + num_pkts) {
5080                 goto loopback_test_done;
5081         }
5082
5083         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5084         rx_skb = rx_buf->skb;
5085
5086         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5087         skb_reserve(rx_skb, bp->rx_offset);
5088
5089         pci_dma_sync_single_for_cpu(bp->pdev,
5090                 pci_unmap_addr(rx_buf, mapping),
5091                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5092
5093         if (rx_hdr->l2_fhdr_status &
5094                 (L2_FHDR_ERRORS_BAD_CRC |
5095                 L2_FHDR_ERRORS_PHY_DECODE |
5096                 L2_FHDR_ERRORS_ALIGNMENT |
5097                 L2_FHDR_ERRORS_TOO_SHORT |
5098                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5099
5100                 goto loopback_test_done;
5101         }
5102
5103         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5104                 goto loopback_test_done;
5105         }
5106
5107         for (i = 14; i < pkt_size; i++) {
5108                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5109                         goto loopback_test_done;
5110                 }
5111         }
5112
5113         ret = 0;
5114
5115 loopback_test_done:
5116         bp->loopback = 0;
5117         return ret;
5118 }
5119
5120 #define BNX2_MAC_LOOPBACK_FAILED        1
5121 #define BNX2_PHY_LOOPBACK_FAILED        2
5122 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5123                                          BNX2_PHY_LOOPBACK_FAILED)
5124
5125 static int
5126 bnx2_test_loopback(struct bnx2 *bp)
5127 {
5128         int rc = 0;
5129
5130         if (!netif_running(bp->dev))
5131                 return BNX2_LOOPBACK_FAILED;
5132
5133         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5134         spin_lock_bh(&bp->phy_lock);
5135         bnx2_init_phy(bp);
5136         spin_unlock_bh(&bp->phy_lock);
5137         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5138                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5139         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5140                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5141         return rc;
5142 }
5143
5144 #define NVRAM_SIZE 0x200
5145 #define CRC32_RESIDUAL 0xdebb20e3
5146
5147 static int
5148 bnx2_test_nvram(struct bnx2 *bp)
5149 {
5150         u32 buf[NVRAM_SIZE / 4];
5151         u8 *data = (u8 *) buf;
5152         int rc = 0;
5153         u32 magic, csum;
5154
5155         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5156                 goto test_nvram_done;
5157
5158         magic = be32_to_cpu(buf[0]);
5159         if (magic != 0x669955aa) {
5160                 rc = -ENODEV;
5161                 goto test_nvram_done;
5162         }
5163
5164         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5165                 goto test_nvram_done;
5166
5167         csum = ether_crc_le(0x100, data);
5168         if (csum != CRC32_RESIDUAL) {
5169                 rc = -ENODEV;
5170                 goto test_nvram_done;
5171         }
5172
5173         csum = ether_crc_le(0x100, data + 0x100);
5174         if (csum != CRC32_RESIDUAL) {
5175                 rc = -ENODEV;
5176         }
5177
5178 test_nvram_done:
5179         return rc;
5180 }
5181
5182 static int
5183 bnx2_test_link(struct bnx2 *bp)
5184 {
5185         u32 bmsr;
5186
5187         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5188                 if (bp->link_up)
5189                         return 0;
5190                 return -ENODEV;
5191         }
5192         spin_lock_bh(&bp->phy_lock);
5193         bnx2_enable_bmsr1(bp);
5194         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5195         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5196         bnx2_disable_bmsr1(bp);
5197         spin_unlock_bh(&bp->phy_lock);
5198
5199         if (bmsr & BMSR_LSTATUS) {
5200                 return 0;
5201         }
5202         return -ENODEV;
5203 }
5204
5205 static int
5206 bnx2_test_intr(struct bnx2 *bp)
5207 {
5208         int i;
5209         u16 status_idx;
5210
5211         if (!netif_running(bp->dev))
5212                 return -ENODEV;
5213
5214         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5215
5216         /* This register is not touched during run-time. */
5217         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5218         REG_RD(bp, BNX2_HC_COMMAND);
5219
5220         for (i = 0; i < 10; i++) {
5221                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5222                         status_idx) {
5223
5224                         break;
5225                 }
5226
5227                 msleep_interruptible(10);
5228         }
5229         if (i < 10)
5230                 return 0;
5231
5232         return -ENODEV;
5233 }
5234
5235 static void
5236 bnx2_5706_serdes_timer(struct bnx2 *bp)
5237 {
5238         spin_lock(&bp->phy_lock);
5239         if (bp->serdes_an_pending)
5240                 bp->serdes_an_pending--;
5241         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5242                 u32 bmcr;
5243
5244                 bp->current_interval = bp->timer_interval;
5245
5246                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5247
5248                 if (bmcr & BMCR_ANENABLE) {
5249                         u32 phy1, phy2;
5250
5251                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5252                         bnx2_read_phy(bp, 0x1c, &phy1);
5253
5254                         bnx2_write_phy(bp, 0x17, 0x0f01);
5255                         bnx2_read_phy(bp, 0x15, &phy2);
5256                         bnx2_write_phy(bp, 0x17, 0x0f01);
5257                         bnx2_read_phy(bp, 0x15, &phy2);
5258
5259                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5260                                 !(phy2 & 0x20)) {       /* no CONFIG */
5261
5262                                 bmcr &= ~BMCR_ANENABLE;
5263                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5264                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5265                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5266                         }
5267                 }
5268         }
5269         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5270                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5271                 u32 phy2;
5272
5273                 bnx2_write_phy(bp, 0x17, 0x0f01);
5274                 bnx2_read_phy(bp, 0x15, &phy2);
5275                 if (phy2 & 0x20) {
5276                         u32 bmcr;
5277
5278                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5279                         bmcr |= BMCR_ANENABLE;
5280                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5281
5282                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5283                 }
5284         } else
5285                 bp->current_interval = bp->timer_interval;
5286
5287         spin_unlock(&bp->phy_lock);
5288 }
5289
5290 static void
5291 bnx2_5708_serdes_timer(struct bnx2 *bp)
5292 {
5293         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5294                 return;
5295
5296         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5297                 bp->serdes_an_pending = 0;
5298                 return;
5299         }
5300
5301         spin_lock(&bp->phy_lock);
5302         if (bp->serdes_an_pending)
5303                 bp->serdes_an_pending--;
5304         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5305                 u32 bmcr;
5306
5307                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5308                 if (bmcr & BMCR_ANENABLE) {
5309                         bnx2_enable_forced_2g5(bp);
5310                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5311                 } else {
5312                         bnx2_disable_forced_2g5(bp);
5313                         bp->serdes_an_pending = 2;
5314                         bp->current_interval = bp->timer_interval;
5315                 }
5316
5317         } else
5318                 bp->current_interval = bp->timer_interval;
5319
5320         spin_unlock(&bp->phy_lock);
5321 }
5322
5323 static void
5324 bnx2_timer(unsigned long data)
5325 {
5326         struct bnx2 *bp = (struct bnx2 *) data;
5327
5328         if (!netif_running(bp->dev))
5329                 return;
5330
5331         if (atomic_read(&bp->intr_sem) != 0)
5332                 goto bnx2_restart_timer;
5333
5334         bnx2_send_heart_beat(bp);
5335
5336         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5337
5338         /* workaround occasional corrupted counters */
5339         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5340                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5341                                             BNX2_HC_COMMAND_STATS_NOW);
5342
5343         if (bp->phy_flags & PHY_SERDES_FLAG) {
5344                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5345                         bnx2_5706_serdes_timer(bp);
5346                 else
5347                         bnx2_5708_serdes_timer(bp);
5348         }
5349
5350 bnx2_restart_timer:
5351         mod_timer(&bp->timer, jiffies + bp->current_interval);
5352 }
5353
5354 static int
5355 bnx2_request_irq(struct bnx2 *bp)
5356 {
5357         struct net_device *dev = bp->dev;
5358         unsigned long flags;
5359         struct bnx2_irq *irq;
5360         int rc = 0, i;
5361
5362         if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5363                 flags = 0;
5364         else
5365                 flags = IRQF_SHARED;
5366
5367         for (i = 0; i < bp->irq_nvecs; i++) {
5368                 irq = &bp->irq_tbl[i];
5369                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5370                                  dev);
5371                 if (rc)
5372                         break;
5373                 irq->requested = 1;
5374         }
5375         return rc;
5376 }
5377
5378 static void
5379 bnx2_free_irq(struct bnx2 *bp)
5380 {
5381         struct net_device *dev = bp->dev;
5382         struct bnx2_irq *irq;
5383         int i;
5384
5385         for (i = 0; i < bp->irq_nvecs; i++) {
5386                 irq = &bp->irq_tbl[i];
5387                 if (irq->requested)
5388                         free_irq(irq->vector, dev);
5389                 irq->requested = 0;
5390         }
5391         if (bp->flags & USING_MSI_FLAG)
5392                 pci_disable_msi(bp->pdev);
5393         else if (bp->flags & USING_MSIX_FLAG)
5394                 pci_disable_msix(bp->pdev);
5395
5396         bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5397 }
5398
5399 static void
5400 bnx2_enable_msix(struct bnx2 *bp)
5401 {
5402         bnx2_setup_msix_tbl(bp);
5403         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5404         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5405         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5406 }
5407
5408 static void
5409 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5410 {
5411         bp->irq_tbl[0].handler = bnx2_interrupt;
5412         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5413         bp->irq_nvecs = 1;
5414         bp->irq_tbl[0].vector = bp->pdev->irq;
5415
5416         if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5417                 bnx2_enable_msix(bp);
5418
5419         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5420             !(bp->flags & USING_MSIX_FLAG)) {
5421                 if (pci_enable_msi(bp->pdev) == 0) {
5422                         bp->flags |= USING_MSI_FLAG;
5423                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5424                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5425                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5426                         } else
5427                                 bp->irq_tbl[0].handler = bnx2_msi;
5428
5429                         bp->irq_tbl[0].vector = bp->pdev->irq;
5430                 }
5431         }
5432 }
5433
5434 /* Called with rtnl_lock */
5435 static int
5436 bnx2_open(struct net_device *dev)
5437 {
5438         struct bnx2 *bp = netdev_priv(dev);
5439         int rc;
5440
5441         netif_carrier_off(dev);
5442
5443         bnx2_set_power_state(bp, PCI_D0);
5444         bnx2_disable_int(bp);
5445
5446         rc = bnx2_alloc_mem(bp);
5447         if (rc)
5448                 return rc;
5449
5450         bnx2_setup_int_mode(bp, disable_msi);
5451         bnx2_napi_enable(bp);
5452         rc = bnx2_request_irq(bp);
5453
5454         if (rc) {
5455                 bnx2_napi_disable(bp);
5456                 bnx2_free_mem(bp);
5457                 return rc;
5458         }
5459
5460         rc = bnx2_init_nic(bp);
5461
5462         if (rc) {
5463                 bnx2_napi_disable(bp);
5464                 bnx2_free_irq(bp);
5465                 bnx2_free_skbs(bp);
5466                 bnx2_free_mem(bp);
5467                 return rc;
5468         }
5469
5470         mod_timer(&bp->timer, jiffies + bp->current_interval);
5471
5472         atomic_set(&bp->intr_sem, 0);
5473
5474         bnx2_enable_int(bp);
5475
5476         if (bp->flags & USING_MSI_FLAG) {
5477                 /* Test MSI to make sure it is working
5478                  * If MSI test fails, go back to INTx mode
5479                  */
5480                 if (bnx2_test_intr(bp) != 0) {
5481                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5482                                " using MSI, switching to INTx mode. Please"
5483                                " report this failure to the PCI maintainer"
5484                                " and include system chipset information.\n",
5485                                bp->dev->name);
5486
5487                         bnx2_disable_int(bp);
5488                         bnx2_free_irq(bp);
5489
5490                         bnx2_setup_int_mode(bp, 1);
5491
5492                         rc = bnx2_init_nic(bp);
5493
5494                         if (!rc)
5495                                 rc = bnx2_request_irq(bp);
5496
5497                         if (rc) {
5498                                 bnx2_napi_disable(bp);
5499                                 bnx2_free_skbs(bp);
5500                                 bnx2_free_mem(bp);
5501                                 del_timer_sync(&bp->timer);
5502                                 return rc;
5503                         }
5504                         bnx2_enable_int(bp);
5505                 }
5506         }
5507         if (bp->flags & USING_MSI_FLAG) {
5508                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5509         }
5510
5511         netif_start_queue(dev);
5512
5513         return 0;
5514 }
5515
5516 static void
5517 bnx2_reset_task(struct work_struct *work)
5518 {
5519         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5520
5521         if (!netif_running(bp->dev))
5522                 return;
5523
5524         bp->in_reset_task = 1;
5525         bnx2_netif_stop(bp);
5526
5527         bnx2_init_nic(bp);
5528
5529         atomic_set(&bp->intr_sem, 1);
5530         bnx2_netif_start(bp);
5531         bp->in_reset_task = 0;
5532 }
5533
5534 static void
5535 bnx2_tx_timeout(struct net_device *dev)
5536 {
5537         struct bnx2 *bp = netdev_priv(dev);
5538
5539         /* This allows the netif to be shutdown gracefully before resetting */
5540         schedule_work(&bp->reset_task);
5541 }
5542
5543 #ifdef BCM_VLAN
5544 /* Called with rtnl_lock */
5545 static void
5546 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5547 {
5548         struct bnx2 *bp = netdev_priv(dev);
5549
5550         bnx2_netif_stop(bp);
5551
5552         bp->vlgrp = vlgrp;
5553         bnx2_set_rx_mode(dev);
5554
5555         bnx2_netif_start(bp);
5556 }
5557 #endif
5558
5559 /* Called with netif_tx_lock.
5560  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5561  * netif_wake_queue().
5562  */
5563 static int
5564 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5565 {
5566         struct bnx2 *bp = netdev_priv(dev);
5567         dma_addr_t mapping;
5568         struct tx_bd *txbd;
5569         struct sw_bd *tx_buf;
5570         u32 len, vlan_tag_flags, last_frag, mss;
5571         u16 prod, ring_prod;
5572         int i;
5573         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5574
5575         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5576             (skb_shinfo(skb)->nr_frags + 1))) {
5577                 netif_stop_queue(dev);
5578                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5579                         dev->name);
5580
5581                 return NETDEV_TX_BUSY;
5582         }
5583         len = skb_headlen(skb);
5584         prod = bp->tx_prod;
5585         ring_prod = TX_RING_IDX(prod);
5586
5587         vlan_tag_flags = 0;
5588         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5589                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5590         }
5591
5592         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5593                 vlan_tag_flags |=
5594                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5595         }
5596         if ((mss = skb_shinfo(skb)->gso_size)) {
5597                 u32 tcp_opt_len, ip_tcp_len;
5598                 struct iphdr *iph;
5599
5600                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5601
5602                 tcp_opt_len = tcp_optlen(skb);
5603
5604                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5605                         u32 tcp_off = skb_transport_offset(skb) -
5606                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5607
5608                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5609                                           TX_BD_FLAGS_SW_FLAGS;
5610                         if (likely(tcp_off == 0))
5611                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5612                         else {
5613                                 tcp_off >>= 3;
5614                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5615                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5616                                                   ((tcp_off & 0x10) <<
5617                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5618                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5619                         }
5620                 } else {
5621                         if (skb_header_cloned(skb) &&
5622                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5623                                 dev_kfree_skb(skb);
5624                                 return NETDEV_TX_OK;
5625                         }
5626
5627                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5628
5629                         iph = ip_hdr(skb);
5630                         iph->check = 0;
5631                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5632                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5633                                                                  iph->daddr, 0,
5634                                                                  IPPROTO_TCP,
5635                                                                  0);
5636                         if (tcp_opt_len || (iph->ihl > 5)) {
5637                                 vlan_tag_flags |= ((iph->ihl - 5) +
5638                                                    (tcp_opt_len >> 2)) << 8;
5639                         }
5640                 }
5641         } else
5642                 mss = 0;
5643
5644         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5645
5646         tx_buf = &bp->tx_buf_ring[ring_prod];
5647         tx_buf->skb = skb;
5648         pci_unmap_addr_set(tx_buf, mapping, mapping);
5649
5650         txbd = &bp->tx_desc_ring[ring_prod];
5651
5652         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5653         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5654         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5655         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5656
5657         last_frag = skb_shinfo(skb)->nr_frags;
5658
5659         for (i = 0; i < last_frag; i++) {
5660                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5661
5662                 prod = NEXT_TX_BD(prod);
5663                 ring_prod = TX_RING_IDX(prod);
5664                 txbd = &bp->tx_desc_ring[ring_prod];
5665
5666                 len = frag->size;
5667                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5668                         len, PCI_DMA_TODEVICE);
5669                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5670                                 mapping, mapping);
5671
5672                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5673                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5674                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5675                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5676
5677         }
5678         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5679
5680         prod = NEXT_TX_BD(prod);
5681         bp->tx_prod_bseq += skb->len;
5682
5683         REG_WR16(bp, bp->tx_bidx_addr, prod);
5684         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5685
5686         mmiowb();
5687
5688         bp->tx_prod = prod;
5689         dev->trans_start = jiffies;
5690
5691         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5692                 netif_stop_queue(dev);
5693                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5694                         netif_wake_queue(dev);
5695         }
5696
5697         return NETDEV_TX_OK;
5698 }
5699
5700 /* Called with rtnl_lock */
5701 static int
5702 bnx2_close(struct net_device *dev)
5703 {
5704         struct bnx2 *bp = netdev_priv(dev);
5705         u32 reset_code;
5706
5707         /* Calling flush_scheduled_work() may deadlock because
5708          * linkwatch_event() may be on the workqueue and it will try to get
5709          * the rtnl_lock which we are holding.
5710          */
5711         while (bp->in_reset_task)
5712                 msleep(1);
5713
5714         bnx2_disable_int_sync(bp);
5715         bnx2_napi_disable(bp);
5716         del_timer_sync(&bp->timer);
5717         if (bp->flags & NO_WOL_FLAG)
5718                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5719         else if (bp->wol)
5720                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5721         else
5722                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5723         bnx2_reset_chip(bp, reset_code);
5724         bnx2_free_irq(bp);
5725         bnx2_free_skbs(bp);
5726         bnx2_free_mem(bp);
5727         bp->link_up = 0;
5728         netif_carrier_off(bp->dev);
5729         bnx2_set_power_state(bp, PCI_D3hot);
5730         return 0;
5731 }
5732
5733 #define GET_NET_STATS64(ctr)                                    \
5734         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5735         (unsigned long) (ctr##_lo)
5736
5737 #define GET_NET_STATS32(ctr)            \
5738         (ctr##_lo)
5739
5740 #if (BITS_PER_LONG == 64)
5741 #define GET_NET_STATS   GET_NET_STATS64
5742 #else
5743 #define GET_NET_STATS   GET_NET_STATS32
5744 #endif
5745
5746 static struct net_device_stats *
5747 bnx2_get_stats(struct net_device *dev)
5748 {
5749         struct bnx2 *bp = netdev_priv(dev);
5750         struct statistics_block *stats_blk = bp->stats_blk;
5751         struct net_device_stats *net_stats = &bp->net_stats;
5752
5753         if (bp->stats_blk == NULL) {
5754                 return net_stats;
5755         }
5756         net_stats->rx_packets =
5757                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5758                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5759                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5760
5761         net_stats->tx_packets =
5762                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5763                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5764                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5765
5766         net_stats->rx_bytes =
5767                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5768
5769         net_stats->tx_bytes =
5770                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5771
5772         net_stats->multicast =
5773                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5774
5775         net_stats->collisions =
5776                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5777
5778         net_stats->rx_length_errors =
5779                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5780                 stats_blk->stat_EtherStatsOverrsizePkts);
5781
5782         net_stats->rx_over_errors =
5783                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5784
5785         net_stats->rx_frame_errors =
5786                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5787
5788         net_stats->rx_crc_errors =
5789                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5790
5791         net_stats->rx_errors = net_stats->rx_length_errors +
5792                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5793                 net_stats->rx_crc_errors;
5794
5795         net_stats->tx_aborted_errors =
5796                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5797                 stats_blk->stat_Dot3StatsLateCollisions);
5798
5799         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5800             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5801                 net_stats->tx_carrier_errors = 0;
5802         else {
5803                 net_stats->tx_carrier_errors =
5804                         (unsigned long)
5805                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5806         }
5807
5808         net_stats->tx_errors =
5809                 (unsigned long)
5810                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5811                 +
5812                 net_stats->tx_aborted_errors +
5813                 net_stats->tx_carrier_errors;
5814
5815         net_stats->rx_missed_errors =
5816                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5817                 stats_blk->stat_FwRxDrop);
5818
5819         return net_stats;
5820 }
5821
5822 /* All ethtool functions called with rtnl_lock */
5823
5824 static int
5825 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5826 {
5827         struct bnx2 *bp = netdev_priv(dev);
5828         int support_serdes = 0, support_copper = 0;
5829
5830         cmd->supported = SUPPORTED_Autoneg;
5831         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5832                 support_serdes = 1;
5833                 support_copper = 1;
5834         } else if (bp->phy_port == PORT_FIBRE)
5835                 support_serdes = 1;
5836         else
5837                 support_copper = 1;
5838
5839         if (support_serdes) {
5840                 cmd->supported |= SUPPORTED_1000baseT_Full |
5841                         SUPPORTED_FIBRE;
5842                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5843                         cmd->supported |= SUPPORTED_2500baseX_Full;
5844
5845         }
5846         if (support_copper) {
5847                 cmd->supported |= SUPPORTED_10baseT_Half |
5848                         SUPPORTED_10baseT_Full |
5849                         SUPPORTED_100baseT_Half |
5850                         SUPPORTED_100baseT_Full |
5851                         SUPPORTED_1000baseT_Full |
5852                         SUPPORTED_TP;
5853
5854         }
5855
5856         spin_lock_bh(&bp->phy_lock);
5857         cmd->port = bp->phy_port;
5858         cmd->advertising = bp->advertising;
5859
5860         if (bp->autoneg & AUTONEG_SPEED) {
5861                 cmd->autoneg = AUTONEG_ENABLE;
5862         }
5863         else {
5864                 cmd->autoneg = AUTONEG_DISABLE;
5865         }
5866
5867         if (netif_carrier_ok(dev)) {
5868                 cmd->speed = bp->line_speed;
5869                 cmd->duplex = bp->duplex;
5870         }
5871         else {
5872                 cmd->speed = -1;
5873                 cmd->duplex = -1;
5874         }
5875         spin_unlock_bh(&bp->phy_lock);
5876
5877         cmd->transceiver = XCVR_INTERNAL;
5878         cmd->phy_address = bp->phy_addr;
5879
5880         return 0;
5881 }
5882
5883 static int
5884 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5885 {
5886         struct bnx2 *bp = netdev_priv(dev);
5887         u8 autoneg = bp->autoneg;
5888         u8 req_duplex = bp->req_duplex;
5889         u16 req_line_speed = bp->req_line_speed;
5890         u32 advertising = bp->advertising;
5891         int err = -EINVAL;
5892
5893         spin_lock_bh(&bp->phy_lock);
5894
5895         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5896                 goto err_out_unlock;
5897
5898         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5899                 goto err_out_unlock;
5900
5901         if (cmd->autoneg == AUTONEG_ENABLE) {
5902                 autoneg |= AUTONEG_SPEED;
5903
5904                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5905
5906                 /* allow advertising 1 speed */
5907                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5908                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5909                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5910                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5911
5912                         if (cmd->port == PORT_FIBRE)
5913                                 goto err_out_unlock;
5914
5915                         advertising = cmd->advertising;
5916
5917                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5918                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5919                             (cmd->port == PORT_TP))
5920                                 goto err_out_unlock;
5921                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5922                         advertising = cmd->advertising;
5923                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5924                         goto err_out_unlock;
5925                 else {
5926                         if (cmd->port == PORT_FIBRE)
5927                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5928                         else
5929                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5930                 }
5931                 advertising |= ADVERTISED_Autoneg;
5932         }
5933         else {
5934                 if (cmd->port == PORT_FIBRE) {
5935                         if ((cmd->speed != SPEED_1000 &&
5936                              cmd->speed != SPEED_2500) ||
5937                             (cmd->duplex != DUPLEX_FULL))
5938                                 goto err_out_unlock;
5939
5940                         if (cmd->speed == SPEED_2500 &&
5941                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5942                                 goto err_out_unlock;
5943                 }
5944                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5945                         goto err_out_unlock;
5946
5947                 autoneg &= ~AUTONEG_SPEED;
5948                 req_line_speed = cmd->speed;
5949                 req_duplex = cmd->duplex;
5950                 advertising = 0;
5951         }
5952
5953         bp->autoneg = autoneg;
5954         bp->advertising = advertising;
5955         bp->req_line_speed = req_line_speed;
5956         bp->req_duplex = req_duplex;
5957
5958         err = bnx2_setup_phy(bp, cmd->port);
5959
5960 err_out_unlock:
5961         spin_unlock_bh(&bp->phy_lock);
5962
5963         return err;
5964 }
5965
5966 static void
5967 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5968 {
5969         struct bnx2 *bp = netdev_priv(dev);
5970
5971         strcpy(info->driver, DRV_MODULE_NAME);
5972         strcpy(info->version, DRV_MODULE_VERSION);
5973         strcpy(info->bus_info, pci_name(bp->pdev));
5974         strcpy(info->fw_version, bp->fw_version);
5975 }
5976
5977 #define BNX2_REGDUMP_LEN                (32 * 1024)
5978
5979 static int
5980 bnx2_get_regs_len(struct net_device *dev)
5981 {
5982         return BNX2_REGDUMP_LEN;
5983 }
5984
5985 static void
5986 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5987 {
5988         u32 *p = _p, i, offset;
5989         u8 *orig_p = _p;
5990         struct bnx2 *bp = netdev_priv(dev);
5991         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5992                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5993                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5994                                  0x1040, 0x1048, 0x1080, 0x10a4,
5995                                  0x1400, 0x1490, 0x1498, 0x14f0,
5996                                  0x1500, 0x155c, 0x1580, 0x15dc,
5997                                  0x1600, 0x1658, 0x1680, 0x16d8,
5998                                  0x1800, 0x1820, 0x1840, 0x1854,
5999                                  0x1880, 0x1894, 0x1900, 0x1984,
6000                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6001                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6002                                  0x2000, 0x2030, 0x23c0, 0x2400,
6003                                  0x2800, 0x2820, 0x2830, 0x2850,
6004                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6005                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6006                                  0x4080, 0x4090, 0x43c0, 0x4458,
6007                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6008                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6009                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6010                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6011                                  0x6800, 0x6848, 0x684c, 0x6860,
6012                                  0x6888, 0x6910, 0x8000 };
6013
6014         regs->version = 0;
6015
6016         memset(p, 0, BNX2_REGDUMP_LEN);
6017
6018         if (!netif_running(bp->dev))
6019                 return;
6020
6021         i = 0;
6022         offset = reg_boundaries[0];
6023         p += offset;
6024         while (offset < BNX2_REGDUMP_LEN) {
6025                 *p++ = REG_RD(bp, offset);
6026                 offset += 4;
6027                 if (offset == reg_boundaries[i + 1]) {
6028                         offset = reg_boundaries[i + 2];
6029                         p = (u32 *) (orig_p + offset);
6030                         i += 2;