[BNX2]: Re-structure the 2.5G Serdes code.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91         char *name;
92 } board_info[] __devinitdata = {
93         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94         { "HP NC370T Multifunction Gigabit Server Adapter" },
95         { "HP NC370i Multifunction Gigabit Server Adapter" },
96         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97         { "HP NC370F Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120         { 0, }
121 };
122
123 static struct flash_spec flash_table[] =
124 {
125         /* Slow EEPROM */
126         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129          "EEPROM - slow"},
130         /* Expansion entry 0001 */
131         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134          "Entry 0001"},
135         /* Saifun SA25F010 (non-buffered flash) */
136         /* strap, cfg1, & write1 need updates */
137         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140          "Non-buffered flash (128kB)"},
141         /* Saifun SA25F020 (non-buffered flash) */
142         /* strap, cfg1, & write1 need updates */
143         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146          "Non-buffered flash (256kB)"},
147         /* Expansion entry 0100 */
148         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0100"},
152         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162         /* Saifun SA25F005 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167          "Non-buffered flash (64kB)"},
168         /* Fast EEPROM */
169         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172          "EEPROM - fast"},
173         /* Expansion entry 1001 */
174         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 1001"},
178         /* Expansion entry 1010 */
179         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1010"},
183         /* ATMEL AT45DB011B (buffered flash) */
184         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187          "Buffered flash (128kB)"},
188         /* Expansion entry 1100 */
189         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192          "Entry 1100"},
193         /* Expansion entry 1101 */
194         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1101"},
198         /* Ateml Expansion entry 1110 */
199         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1110 (Atmel)"},
203         /* ATMEL AT45DB021B (buffered flash) */
204         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207          "Buffered flash (256kB)"},
208 };
209
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 {
214         u32 diff;
215
216         smp_mb();
217
218         /* The ring uses 256 indices for 255 entries, one of them
219          * needs to be skipped.
220          */
221         diff = bp->tx_prod - bp->tx_cons;
222         if (unlikely(diff >= TX_DESC_CNT)) {
223                 diff &= 0xffff;
224                 if (diff == TX_DESC_CNT)
225                         diff = MAX_TX_DESC_CNT;
226         }
227         return (bp->tx_ring_size - diff);
228 }
229
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232 {
233         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235 }
236
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239 {
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242 }
243
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246 {
247         offset += cid_addr;
248         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249                 int i;
250
251                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254                 for (i = 0; i < 5; i++) {
255                         u32 val;
256                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258                                 break;
259                         udelay(5);
260                 }
261         } else {
262                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263                 REG_WR(bp, BNX2_CTX_DATA, val);
264         }
265 }
266
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 {
270         u32 val1;
271         int i, ret;
272
273         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280                 udelay(40);
281         }
282
283         val1 = (bp->phy_addr << 21) | (reg << 16) |
284                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285                 BNX2_EMAC_MDIO_COMM_START_BUSY;
286         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288         for (i = 0; i < 50; i++) {
289                 udelay(10);
290
291                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293                         udelay(5);
294
295                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298                         break;
299                 }
300         }
301
302         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303                 *val = 0x0;
304                 ret = -EBUSY;
305         }
306         else {
307                 *val = val1;
308                 ret = 0;
309         }
310
311         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         return ret;
322 }
323
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 {
327         u32 val1;
328         int i, ret;
329
330         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337                 udelay(40);
338         }
339
340         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
344
345         for (i = 0; i < 50; i++) {
346                 udelay(10);
347
348                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350                         udelay(5);
351                         break;
352                 }
353         }
354
355         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356                 ret = -EBUSY;
357         else
358                 ret = 0;
359
360         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367                 udelay(40);
368         }
369
370         return ret;
371 }
372
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
375 {
376         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379 }
380
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
383 {
384         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
392 }
393
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
396 {
397         atomic_inc(&bp->intr_sem);
398         bnx2_disable_int(bp);
399         synchronize_irq(bp->pdev->irq);
400 }
401
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
404 {
405         bnx2_disable_int_sync(bp);
406         if (netif_running(bp->dev)) {
407                 netif_poll_disable(bp->dev);
408                 netif_tx_disable(bp->dev);
409                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410         }
411 }
412
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
415 {
416         if (atomic_dec_and_test(&bp->intr_sem)) {
417                 if (netif_running(bp->dev)) {
418                         netif_wake_queue(bp->dev);
419                         netif_poll_enable(bp->dev);
420                         bnx2_enable_int(bp);
421                 }
422         }
423 }
424
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
427 {
428         int i;
429
430         for (i = 0; i < bp->ctx_pages; i++) {
431                 if (bp->ctx_blk[i]) {
432                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433                                             bp->ctx_blk[i],
434                                             bp->ctx_blk_mapping[i]);
435                         bp->ctx_blk[i] = NULL;
436                 }
437         }
438         if (bp->status_blk) {
439                 pci_free_consistent(bp->pdev, bp->status_stats_size,
440                                     bp->status_blk, bp->status_blk_mapping);
441                 bp->status_blk = NULL;
442                 bp->stats_blk = NULL;
443         }
444         if (bp->tx_desc_ring) {
445                 pci_free_consistent(bp->pdev,
446                                     sizeof(struct tx_bd) * TX_DESC_CNT,
447                                     bp->tx_desc_ring, bp->tx_desc_mapping);
448                 bp->tx_desc_ring = NULL;
449         }
450         kfree(bp->tx_buf_ring);
451         bp->tx_buf_ring = NULL;
452         for (i = 0; i < bp->rx_max_ring; i++) {
453                 if (bp->rx_desc_ring[i])
454                         pci_free_consistent(bp->pdev,
455                                             sizeof(struct rx_bd) * RX_DESC_CNT,
456                                             bp->rx_desc_ring[i],
457                                             bp->rx_desc_mapping[i]);
458                 bp->rx_desc_ring[i] = NULL;
459         }
460         vfree(bp->rx_buf_ring);
461         bp->rx_buf_ring = NULL;
462 }
463
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
466 {
467         int i, status_blk_size;
468
469         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470                                   GFP_KERNEL);
471         if (bp->tx_buf_ring == NULL)
472                 return -ENOMEM;
473
474         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475                                                 sizeof(struct tx_bd) *
476                                                 TX_DESC_CNT,
477                                                 &bp->tx_desc_mapping);
478         if (bp->tx_desc_ring == NULL)
479                 goto alloc_mem_err;
480
481         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482                                   bp->rx_max_ring);
483         if (bp->rx_buf_ring == NULL)
484                 goto alloc_mem_err;
485
486         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487                                    bp->rx_max_ring);
488
489         for (i = 0; i < bp->rx_max_ring; i++) {
490                 bp->rx_desc_ring[i] =
491                         pci_alloc_consistent(bp->pdev,
492                                              sizeof(struct rx_bd) * RX_DESC_CNT,
493                                              &bp->rx_desc_mapping[i]);
494                 if (bp->rx_desc_ring[i] == NULL)
495                         goto alloc_mem_err;
496
497         }
498
499         /* Combine status and statistics blocks into one allocation. */
500         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501         bp->status_stats_size = status_blk_size +
502                                 sizeof(struct statistics_block);
503
504         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505                                               &bp->status_blk_mapping);
506         if (bp->status_blk == NULL)
507                 goto alloc_mem_err;
508
509         memset(bp->status_blk, 0, bp->status_stats_size);
510
511         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512                                   status_blk_size);
513
514         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
515
516         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518                 if (bp->ctx_pages == 0)
519                         bp->ctx_pages = 1;
520                 for (i = 0; i < bp->ctx_pages; i++) {
521                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522                                                 BCM_PAGE_SIZE,
523                                                 &bp->ctx_blk_mapping[i]);
524                         if (bp->ctx_blk[i] == NULL)
525                                 goto alloc_mem_err;
526                 }
527         }
528         return 0;
529
530 alloc_mem_err:
531         bnx2_free_mem(bp);
532         return -ENOMEM;
533 }
534
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
537 {
538         u32 fw_link_status = 0;
539
540         if (bp->link_up) {
541                 u32 bmsr;
542
543                 switch (bp->line_speed) {
544                 case SPEED_10:
545                         if (bp->duplex == DUPLEX_HALF)
546                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
547                         else
548                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
549                         break;
550                 case SPEED_100:
551                         if (bp->duplex == DUPLEX_HALF)
552                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
553                         else
554                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
555                         break;
556                 case SPEED_1000:
557                         if (bp->duplex == DUPLEX_HALF)
558                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559                         else
560                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561                         break;
562                 case SPEED_2500:
563                         if (bp->duplex == DUPLEX_HALF)
564                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565                         else
566                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567                         break;
568                 }
569
570                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572                 if (bp->autoneg) {
573                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
576                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
577
578                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581                         else
582                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583                 }
584         }
585         else
586                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 }
590
591 static void
592 bnx2_report_link(struct bnx2 *bp)
593 {
594         if (bp->link_up) {
595                 netif_carrier_on(bp->dev);
596                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598                 printk("%d Mbps ", bp->line_speed);
599
600                 if (bp->duplex == DUPLEX_FULL)
601                         printk("full duplex");
602                 else
603                         printk("half duplex");
604
605                 if (bp->flow_ctrl) {
606                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
607                                 printk(", receive ");
608                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
609                                         printk("& transmit ");
610                         }
611                         else {
612                                 printk(", transmit ");
613                         }
614                         printk("flow control ON");
615                 }
616                 printk("\n");
617         }
618         else {
619                 netif_carrier_off(bp->dev);
620                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621         }
622
623         bnx2_report_fw_link(bp);
624 }
625
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628 {
629         u32 local_adv, remote_adv;
630
631         bp->flow_ctrl = 0;
632         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635                 if (bp->duplex == DUPLEX_FULL) {
636                         bp->flow_ctrl = bp->req_flow_ctrl;
637                 }
638                 return;
639         }
640
641         if (bp->duplex != DUPLEX_FULL) {
642                 return;
643         }
644
645         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647                 u32 val;
648
649                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651                         bp->flow_ctrl |= FLOW_CTRL_TX;
652                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653                         bp->flow_ctrl |= FLOW_CTRL_RX;
654                 return;
655         }
656
657         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
658         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
659
660         if (bp->phy_flags & PHY_SERDES_FLAG) {
661                 u32 new_local_adv = 0;
662                 u32 new_remote_adv = 0;
663
664                 if (local_adv & ADVERTISE_1000XPAUSE)
665                         new_local_adv |= ADVERTISE_PAUSE_CAP;
666                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
668                 if (remote_adv & ADVERTISE_1000XPAUSE)
669                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
670                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673                 local_adv = new_local_adv;
674                 remote_adv = new_remote_adv;
675         }
676
677         /* See Table 28B-3 of 802.3ab-1999 spec. */
678         if (local_adv & ADVERTISE_PAUSE_CAP) {
679                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
681                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682                         }
683                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684                                 bp->flow_ctrl = FLOW_CTRL_RX;
685                         }
686                 }
687                 else {
688                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
689                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690                         }
691                 }
692         }
693         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697                         bp->flow_ctrl = FLOW_CTRL_TX;
698                 }
699         }
700 }
701
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
704 {
705         u32 val;
706
707         bp->link_up = 1;
708         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710                 case BCM5708S_1000X_STAT1_SPEED_10:
711                         bp->line_speed = SPEED_10;
712                         break;
713                 case BCM5708S_1000X_STAT1_SPEED_100:
714                         bp->line_speed = SPEED_100;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_1G:
717                         bp->line_speed = SPEED_1000;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_2G5:
720                         bp->line_speed = SPEED_2500;
721                         break;
722         }
723         if (val & BCM5708S_1000X_STAT1_FD)
724                 bp->duplex = DUPLEX_FULL;
725         else
726                 bp->duplex = DUPLEX_HALF;
727
728         return 0;
729 }
730
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
733 {
734         u32 bmcr, local_adv, remote_adv, common;
735
736         bp->link_up = 1;
737         bp->line_speed = SPEED_1000;
738
739         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
740         if (bmcr & BMCR_FULLDPLX) {
741                 bp->duplex = DUPLEX_FULL;
742         }
743         else {
744                 bp->duplex = DUPLEX_HALF;
745         }
746
747         if (!(bmcr & BMCR_ANENABLE)) {
748                 return 0;
749         }
750
751         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
752         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
753
754         common = local_adv & remote_adv;
755         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757                 if (common & ADVERTISE_1000XFULL) {
758                         bp->duplex = DUPLEX_FULL;
759                 }
760                 else {
761                         bp->duplex = DUPLEX_HALF;
762                 }
763         }
764
765         return 0;
766 }
767
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
770 {
771         u32 bmcr;
772
773         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
774         if (bmcr & BMCR_ANENABLE) {
775                 u32 local_adv, remote_adv, common;
776
777                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780                 common = local_adv & (remote_adv >> 2);
781                 if (common & ADVERTISE_1000FULL) {
782                         bp->line_speed = SPEED_1000;
783                         bp->duplex = DUPLEX_FULL;
784                 }
785                 else if (common & ADVERTISE_1000HALF) {
786                         bp->line_speed = SPEED_1000;
787                         bp->duplex = DUPLEX_HALF;
788                 }
789                 else {
790                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
791                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
792
793                         common = local_adv & remote_adv;
794                         if (common & ADVERTISE_100FULL) {
795                                 bp->line_speed = SPEED_100;
796                                 bp->duplex = DUPLEX_FULL;
797                         }
798                         else if (common & ADVERTISE_100HALF) {
799                                 bp->line_speed = SPEED_100;
800                                 bp->duplex = DUPLEX_HALF;
801                         }
802                         else if (common & ADVERTISE_10FULL) {
803                                 bp->line_speed = SPEED_10;
804                                 bp->duplex = DUPLEX_FULL;
805                         }
806                         else if (common & ADVERTISE_10HALF) {
807                                 bp->line_speed = SPEED_10;
808                                 bp->duplex = DUPLEX_HALF;
809                         }
810                         else {
811                                 bp->line_speed = 0;
812                                 bp->link_up = 0;
813                         }
814                 }
815         }
816         else {
817                 if (bmcr & BMCR_SPEED100) {
818                         bp->line_speed = SPEED_100;
819                 }
820                 else {
821                         bp->line_speed = SPEED_10;
822                 }
823                 if (bmcr & BMCR_FULLDPLX) {
824                         bp->duplex = DUPLEX_FULL;
825                 }
826                 else {
827                         bp->duplex = DUPLEX_HALF;
828                 }
829         }
830
831         return 0;
832 }
833
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
836 {
837         u32 val;
838
839         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841                 (bp->duplex == DUPLEX_HALF)) {
842                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843         }
844
845         /* Configure the EMAC mode register. */
846         val = REG_RD(bp, BNX2_EMAC_MODE);
847
848         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850                 BNX2_EMAC_MODE_25G_MODE);
851
852         if (bp->link_up) {
853                 switch (bp->line_speed) {
854                         case SPEED_10:
855                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
857                                         break;
858                                 }
859                                 /* fall through */
860                         case SPEED_100:
861                                 val |= BNX2_EMAC_MODE_PORT_MII;
862                                 break;
863                         case SPEED_2500:
864                                 val |= BNX2_EMAC_MODE_25G_MODE;
865                                 /* fall through */
866                         case SPEED_1000:
867                                 val |= BNX2_EMAC_MODE_PORT_GMII;
868                                 break;
869                 }
870         }
871         else {
872                 val |= BNX2_EMAC_MODE_PORT_GMII;
873         }
874
875         /* Set the MAC to operate in the appropriate duplex mode. */
876         if (bp->duplex == DUPLEX_HALF)
877                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878         REG_WR(bp, BNX2_EMAC_MODE, val);
879
880         /* Enable/disable rx PAUSE. */
881         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883         if (bp->flow_ctrl & FLOW_CTRL_RX)
884                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887         /* Enable/disable tx PAUSE. */
888         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891         if (bp->flow_ctrl & FLOW_CTRL_TX)
892                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895         /* Acknowledge the interrupt. */
896         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898         return 0;
899 }
900
901 static int
902 bnx2_test_and_enable_2g5(struct bnx2 *bp)
903 {
904         u32 up1;
905         int ret = 1;
906
907         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
908                 return 0;
909
910         if (bp->autoneg & AUTONEG_SPEED)
911                 bp->advertising |= ADVERTISED_2500baseX_Full;
912
913         bnx2_read_phy(bp, bp->mii_up1, &up1);
914         if (!(up1 & BCM5708S_UP1_2G5)) {
915                 up1 |= BCM5708S_UP1_2G5;
916                 bnx2_write_phy(bp, bp->mii_up1, up1);
917                 ret = 0;
918         }
919
920         return ret;
921 }
922
923 static int
924 bnx2_test_and_disable_2g5(struct bnx2 *bp)
925 {
926         u32 up1;
927         int ret = 0;
928
929         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
930                 return 0;
931
932         bnx2_read_phy(bp, bp->mii_up1, &up1);
933         if (up1 & BCM5708S_UP1_2G5) {
934                 up1 &= ~BCM5708S_UP1_2G5;
935                 bnx2_write_phy(bp, bp->mii_up1, up1);
936                 ret = 1;
937         }
938
939         return ret;
940 }
941
942 static void
943 bnx2_enable_forced_2g5(struct bnx2 *bp)
944 {
945         u32 bmcr;
946
947         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
948                 return;
949
950         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
951                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
952                 bmcr |= BCM5708S_BMCR_FORCE_2500;
953         }
954
955         if (bp->autoneg & AUTONEG_SPEED) {
956                 bmcr &= ~BMCR_ANENABLE;
957                 if (bp->req_duplex == DUPLEX_FULL)
958                         bmcr |= BMCR_FULLDPLX;
959         }
960         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
961 }
962
963 static void
964 bnx2_disable_forced_2g5(struct bnx2 *bp)
965 {
966         u32 bmcr;
967
968         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
969                 return;
970
971         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
972                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
973                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
974         }
975
976         if (bp->autoneg & AUTONEG_SPEED)
977                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
978         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
979 }
980
981 static int
982 bnx2_set_link(struct bnx2 *bp)
983 {
984         u32 bmsr;
985         u8 link_up;
986
987         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
988                 bp->link_up = 1;
989                 return 0;
990         }
991
992         link_up = bp->link_up;
993
994         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
995         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
996
997         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
998             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
999                 u32 val;
1000
1001                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1002                 if (val & BNX2_EMAC_STATUS_LINK)
1003                         bmsr |= BMSR_LSTATUS;
1004                 else
1005                         bmsr &= ~BMSR_LSTATUS;
1006         }
1007
1008         if (bmsr & BMSR_LSTATUS) {
1009                 bp->link_up = 1;
1010
1011                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1013                                 bnx2_5706s_linkup(bp);
1014                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1015                                 bnx2_5708s_linkup(bp);
1016                 }
1017                 else {
1018                         bnx2_copper_linkup(bp);
1019                 }
1020                 bnx2_resolve_flow_ctrl(bp);
1021         }
1022         else {
1023                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1024                     (bp->autoneg & AUTONEG_SPEED))
1025                         bnx2_disable_forced_2g5(bp);
1026
1027                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1028                 bp->link_up = 0;
1029         }
1030
1031         if (bp->link_up != link_up) {
1032                 bnx2_report_link(bp);
1033         }
1034
1035         bnx2_set_mac_link(bp);
1036
1037         return 0;
1038 }
1039
1040 static int
1041 bnx2_reset_phy(struct bnx2 *bp)
1042 {
1043         int i;
1044         u32 reg;
1045
1046         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1047
1048 #define PHY_RESET_MAX_WAIT 100
1049         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1050                 udelay(10);
1051
1052                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1053                 if (!(reg & BMCR_RESET)) {
1054                         udelay(20);
1055                         break;
1056                 }
1057         }
1058         if (i == PHY_RESET_MAX_WAIT) {
1059                 return -EBUSY;
1060         }
1061         return 0;
1062 }
1063
1064 static u32
1065 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1066 {
1067         u32 adv = 0;
1068
1069         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1070                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1071
1072                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1073                         adv = ADVERTISE_1000XPAUSE;
1074                 }
1075                 else {
1076                         adv = ADVERTISE_PAUSE_CAP;
1077                 }
1078         }
1079         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1080                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1081                         adv = ADVERTISE_1000XPSE_ASYM;
1082                 }
1083                 else {
1084                         adv = ADVERTISE_PAUSE_ASYM;
1085                 }
1086         }
1087         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1088                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1089                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1090                 }
1091                 else {
1092                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1093                 }
1094         }
1095         return adv;
1096 }
1097
1098 static int
1099 bnx2_setup_serdes_phy(struct bnx2 *bp)
1100 {
1101         u32 adv, bmcr;
1102         u32 new_adv = 0;
1103
1104         if (!(bp->autoneg & AUTONEG_SPEED)) {
1105                 u32 new_bmcr;
1106                 int force_link_down = 0;
1107
1108                 if (bp->req_line_speed == SPEED_2500) {
1109                         if (!bnx2_test_and_enable_2g5(bp))
1110                                 force_link_down = 1;
1111                 } else if (bp->req_line_speed == SPEED_1000) {
1112                         if (bnx2_test_and_disable_2g5(bp))
1113                                 force_link_down = 1;
1114                 }
1115                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1116                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1117
1118                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1119                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1120                 new_bmcr |= BMCR_SPEED1000;
1121
1122                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1123                         if (bp->req_line_speed == SPEED_2500)
1124                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1125                         else
1126                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1127                 }
1128
1129                 if (bp->req_duplex == DUPLEX_FULL) {
1130                         adv |= ADVERTISE_1000XFULL;
1131                         new_bmcr |= BMCR_FULLDPLX;
1132                 }
1133                 else {
1134                         adv |= ADVERTISE_1000XHALF;
1135                         new_bmcr &= ~BMCR_FULLDPLX;
1136                 }
1137                 if ((new_bmcr != bmcr) || (force_link_down)) {
1138                         /* Force a link down visible on the other side */
1139                         if (bp->link_up) {
1140                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1141                                                ~(ADVERTISE_1000XFULL |
1142                                                  ADVERTISE_1000XHALF));
1143                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1144                                         BMCR_ANRESTART | BMCR_ANENABLE);
1145
1146                                 bp->link_up = 0;
1147                                 netif_carrier_off(bp->dev);
1148                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1149                                 bnx2_report_link(bp);
1150                         }
1151                         bnx2_write_phy(bp, bp->mii_adv, adv);
1152                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1153                 } else {
1154                         bnx2_resolve_flow_ctrl(bp);
1155                         bnx2_set_mac_link(bp);
1156                 }
1157                 return 0;
1158         }
1159
1160         bnx2_test_and_enable_2g5(bp);
1161
1162         if (bp->advertising & ADVERTISED_1000baseT_Full)
1163                 new_adv |= ADVERTISE_1000XFULL;
1164
1165         new_adv |= bnx2_phy_get_pause_adv(bp);
1166
1167         bnx2_read_phy(bp, bp->mii_adv, &adv);
1168         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169
1170         bp->serdes_an_pending = 0;
1171         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1172                 /* Force a link down visible on the other side */
1173                 if (bp->link_up) {
1174                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1175                         spin_unlock_bh(&bp->phy_lock);
1176                         msleep(20);
1177                         spin_lock_bh(&bp->phy_lock);
1178                 }
1179
1180                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1181                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1182                         BMCR_ANENABLE);
1183                 /* Speed up link-up time when the link partner
1184                  * does not autonegotiate which is very common
1185                  * in blade servers. Some blade servers use
1186                  * IPMI for kerboard input and it's important
1187                  * to minimize link disruptions. Autoneg. involves
1188                  * exchanging base pages plus 3 next pages and
1189                  * normally completes in about 120 msec.
1190                  */
1191                 bp->current_interval = SERDES_AN_TIMEOUT;
1192                 bp->serdes_an_pending = 1;
1193                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1194         } else {
1195                 bnx2_resolve_flow_ctrl(bp);
1196                 bnx2_set_mac_link(bp);
1197         }
1198
1199         return 0;
1200 }
1201
1202 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1203         (ADVERTISED_1000baseT_Full)
1204
1205 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1206         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1207         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1208         ADVERTISED_1000baseT_Full)
1209
1210 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1211         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1212
1213 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1214
1215 static int
1216 bnx2_setup_copper_phy(struct bnx2 *bp)
1217 {
1218         u32 bmcr;
1219         u32 new_bmcr;
1220
1221         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1222
1223         if (bp->autoneg & AUTONEG_SPEED) {
1224                 u32 adv_reg, adv1000_reg;
1225                 u32 new_adv_reg = 0;
1226                 u32 new_adv1000_reg = 0;
1227
1228                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1229                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1230                         ADVERTISE_PAUSE_ASYM);
1231
1232                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1233                 adv1000_reg &= PHY_ALL_1000_SPEED;
1234
1235                 if (bp->advertising & ADVERTISED_10baseT_Half)
1236                         new_adv_reg |= ADVERTISE_10HALF;
1237                 if (bp->advertising & ADVERTISED_10baseT_Full)
1238                         new_adv_reg |= ADVERTISE_10FULL;
1239                 if (bp->advertising & ADVERTISED_100baseT_Half)
1240                         new_adv_reg |= ADVERTISE_100HALF;
1241                 if (bp->advertising & ADVERTISED_100baseT_Full)
1242                         new_adv_reg |= ADVERTISE_100FULL;
1243                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1244                         new_adv1000_reg |= ADVERTISE_1000FULL;
1245
1246                 new_adv_reg |= ADVERTISE_CSMA;
1247
1248                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1249
1250                 if ((adv1000_reg != new_adv1000_reg) ||
1251                         (adv_reg != new_adv_reg) ||
1252                         ((bmcr & BMCR_ANENABLE) == 0)) {
1253
1254                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1255                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1256                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1257                                 BMCR_ANENABLE);
1258                 }
1259                 else if (bp->link_up) {
1260                         /* Flow ctrl may have changed from auto to forced */
1261                         /* or vice-versa. */
1262
1263                         bnx2_resolve_flow_ctrl(bp);
1264                         bnx2_set_mac_link(bp);
1265                 }
1266                 return 0;
1267         }
1268
1269         new_bmcr = 0;
1270         if (bp->req_line_speed == SPEED_100) {
1271                 new_bmcr |= BMCR_SPEED100;
1272         }
1273         if (bp->req_duplex == DUPLEX_FULL) {
1274                 new_bmcr |= BMCR_FULLDPLX;
1275         }
1276         if (new_bmcr != bmcr) {
1277                 u32 bmsr;
1278
1279                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1280                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1281
1282                 if (bmsr & BMSR_LSTATUS) {
1283                         /* Force link down */
1284                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1285                         spin_unlock_bh(&bp->phy_lock);
1286                         msleep(50);
1287                         spin_lock_bh(&bp->phy_lock);
1288
1289                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1290                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1291                 }
1292
1293                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1294
1295                 /* Normally, the new speed is setup after the link has
1296                  * gone down and up again. In some cases, link will not go
1297                  * down so we need to set up the new speed here.
1298                  */
1299                 if (bmsr & BMSR_LSTATUS) {
1300                         bp->line_speed = bp->req_line_speed;
1301                         bp->duplex = bp->req_duplex;
1302                         bnx2_resolve_flow_ctrl(bp);
1303                         bnx2_set_mac_link(bp);
1304                 }
1305         }
1306         return 0;
1307 }
1308
1309 static int
1310 bnx2_setup_phy(struct bnx2 *bp)
1311 {
1312         if (bp->loopback == MAC_LOOPBACK)
1313                 return 0;
1314
1315         if (bp->phy_flags & PHY_SERDES_FLAG) {
1316                 return (bnx2_setup_serdes_phy(bp));
1317         }
1318         else {
1319                 return (bnx2_setup_copper_phy(bp));
1320         }
1321 }
1322
1323 static int
1324 bnx2_init_5708s_phy(struct bnx2 *bp)
1325 {
1326         u32 val;
1327
1328         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1329         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1330         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1331
1332         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1333         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1334         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1335
1336         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1337         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1338         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1339
1340         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1341                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1342                 val |= BCM5708S_UP1_2G5;
1343                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1344         }
1345
1346         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1347             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1348             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1349                 /* increase tx signal amplitude */
1350                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1351                                BCM5708S_BLK_ADDR_TX_MISC);
1352                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1353                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1354                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1355                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1356         }
1357
1358         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1359               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1360
1361         if (val) {
1362                 u32 is_backplane;
1363
1364                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1365                                           BNX2_SHARED_HW_CFG_CONFIG);
1366                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1367                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1368                                        BCM5708S_BLK_ADDR_TX_MISC);
1369                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1370                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1371                                        BCM5708S_BLK_ADDR_DIG);
1372                 }
1373         }
1374         return 0;
1375 }
1376
1377 static int
1378 bnx2_init_5706s_phy(struct bnx2 *bp)
1379 {
1380         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1381
1382         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1383                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1384
1385         if (bp->dev->mtu > 1500) {
1386                 u32 val;
1387
1388                 /* Set extended packet length bit */
1389                 bnx2_write_phy(bp, 0x18, 0x7);
1390                 bnx2_read_phy(bp, 0x18, &val);
1391                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1392
1393                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1394                 bnx2_read_phy(bp, 0x1c, &val);
1395                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1396         }
1397         else {
1398                 u32 val;
1399
1400                 bnx2_write_phy(bp, 0x18, 0x7);
1401                 bnx2_read_phy(bp, 0x18, &val);
1402                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1403
1404                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1405                 bnx2_read_phy(bp, 0x1c, &val);
1406                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1407         }
1408
1409         return 0;
1410 }
1411
1412 static int
1413 bnx2_init_copper_phy(struct bnx2 *bp)
1414 {
1415         u32 val;
1416
1417         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1418                 bnx2_write_phy(bp, 0x18, 0x0c00);
1419                 bnx2_write_phy(bp, 0x17, 0x000a);
1420                 bnx2_write_phy(bp, 0x15, 0x310b);
1421                 bnx2_write_phy(bp, 0x17, 0x201f);
1422                 bnx2_write_phy(bp, 0x15, 0x9506);
1423                 bnx2_write_phy(bp, 0x17, 0x401f);
1424                 bnx2_write_phy(bp, 0x15, 0x14e2);
1425                 bnx2_write_phy(bp, 0x18, 0x0400);
1426         }
1427
1428         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1429                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1430                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1431                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1432                 val &= ~(1 << 8);
1433                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1434         }
1435
1436         if (bp->dev->mtu > 1500) {
1437                 /* Set extended packet length bit */
1438                 bnx2_write_phy(bp, 0x18, 0x7);
1439                 bnx2_read_phy(bp, 0x18, &val);
1440                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1441
1442                 bnx2_read_phy(bp, 0x10, &val);
1443                 bnx2_write_phy(bp, 0x10, val | 0x1);
1444         }
1445         else {
1446                 bnx2_write_phy(bp, 0x18, 0x7);
1447                 bnx2_read_phy(bp, 0x18, &val);
1448                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1449
1450                 bnx2_read_phy(bp, 0x10, &val);
1451                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1452         }
1453
1454         /* ethernet@wirespeed */
1455         bnx2_write_phy(bp, 0x18, 0x7007);
1456         bnx2_read_phy(bp, 0x18, &val);
1457         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1458         return 0;
1459 }
1460
1461
1462 static int
1463 bnx2_init_phy(struct bnx2 *bp)
1464 {
1465         u32 val;
1466         int rc = 0;
1467
1468         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1469         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1470
1471         bp->mii_bmcr = MII_BMCR;
1472         bp->mii_bmsr = MII_BMSR;
1473         bp->mii_adv = MII_ADVERTISE;
1474         bp->mii_lpa = MII_LPA;
1475
1476         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1477
1478         bnx2_reset_phy(bp);
1479
1480         bnx2_read_phy(bp, MII_PHYSID1, &val);
1481         bp->phy_id = val << 16;
1482         bnx2_read_phy(bp, MII_PHYSID2, &val);
1483         bp->phy_id |= val & 0xffff;
1484
1485         if (bp->phy_flags & PHY_SERDES_FLAG) {
1486                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1487                         rc = bnx2_init_5706s_phy(bp);
1488                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1489                         rc = bnx2_init_5708s_phy(bp);
1490         }
1491         else {
1492                 rc = bnx2_init_copper_phy(bp);
1493         }
1494
1495         bnx2_setup_phy(bp);
1496
1497         return rc;
1498 }
1499
1500 static int
1501 bnx2_set_mac_loopback(struct bnx2 *bp)
1502 {
1503         u32 mac_mode;
1504
1505         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1506         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1507         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1508         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1509         bp->link_up = 1;
1510         return 0;
1511 }
1512
1513 static int bnx2_test_link(struct bnx2 *);
1514
1515 static int
1516 bnx2_set_phy_loopback(struct bnx2 *bp)
1517 {
1518         u32 mac_mode;
1519         int rc, i;
1520
1521         spin_lock_bh(&bp->phy_lock);
1522         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1523                             BMCR_SPEED1000);
1524         spin_unlock_bh(&bp->phy_lock);
1525         if (rc)
1526                 return rc;
1527
1528         for (i = 0; i < 10; i++) {
1529                 if (bnx2_test_link(bp) == 0)
1530                         break;
1531                 msleep(100);
1532         }
1533
1534         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1535         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1536                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1537                       BNX2_EMAC_MODE_25G_MODE);
1538
1539         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1540         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1541         bp->link_up = 1;
1542         return 0;
1543 }
1544
1545 static int
1546 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1547 {
1548         int i;
1549         u32 val;
1550
1551         bp->fw_wr_seq++;
1552         msg_data |= bp->fw_wr_seq;
1553
1554         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1555
1556         /* wait for an acknowledgement. */
1557         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1558                 msleep(10);
1559
1560                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1561
1562                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1563                         break;
1564         }
1565         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1566                 return 0;
1567
1568         /* If we timed out, inform the firmware that this is the case. */
1569         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1570                 if (!silent)
1571                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1572                                             "%x\n", msg_data);
1573
1574                 msg_data &= ~BNX2_DRV_MSG_CODE;
1575                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1576
1577                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1578
1579                 return -EBUSY;
1580         }
1581
1582         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1583                 return -EIO;
1584
1585         return 0;
1586 }
1587
1588 static int
1589 bnx2_init_5709_context(struct bnx2 *bp)
1590 {
1591         int i, ret = 0;
1592         u32 val;
1593
1594         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1595         val |= (BCM_PAGE_BITS - 8) << 16;
1596         REG_WR(bp, BNX2_CTX_COMMAND, val);
1597         for (i = 0; i < bp->ctx_pages; i++) {
1598                 int j;
1599
1600                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1601                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1602                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1603                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1604                        (u64) bp->ctx_blk_mapping[i] >> 32);
1605                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1606                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1607                 for (j = 0; j < 10; j++) {
1608
1609                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1610                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1611                                 break;
1612                         udelay(5);
1613                 }
1614                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1615                         ret = -EBUSY;
1616                         break;
1617                 }
1618         }
1619         return ret;
1620 }
1621
1622 static void
1623 bnx2_init_context(struct bnx2 *bp)
1624 {
1625         u32 vcid;
1626
1627         vcid = 96;
1628         while (vcid) {
1629                 u32 vcid_addr, pcid_addr, offset;
1630
1631                 vcid--;
1632
1633                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1634                         u32 new_vcid;
1635
1636                         vcid_addr = GET_PCID_ADDR(vcid);
1637                         if (vcid & 0x8) {
1638                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1639                         }
1640                         else {
1641                                 new_vcid = vcid;
1642                         }
1643                         pcid_addr = GET_PCID_ADDR(new_vcid);
1644                 }
1645                 else {
1646                         vcid_addr = GET_CID_ADDR(vcid);
1647                         pcid_addr = vcid_addr;
1648                 }
1649
1650                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1651                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1652
1653                 /* Zero out the context. */
1654                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1655                         CTX_WR(bp, 0x00, offset, 0);
1656                 }
1657
1658                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1659                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1660         }
1661 }
1662
1663 static int
1664 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1665 {
1666         u16 *good_mbuf;
1667         u32 good_mbuf_cnt;
1668         u32 val;
1669
1670         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1671         if (good_mbuf == NULL) {
1672                 printk(KERN_ERR PFX "Failed to allocate memory in "
1673                                     "bnx2_alloc_bad_rbuf\n");
1674                 return -ENOMEM;
1675         }
1676
1677         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1678                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1679
1680         good_mbuf_cnt = 0;
1681
1682         /* Allocate a bunch of mbufs and save the good ones in an array. */
1683         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1684         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1685                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1686
1687                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1688
1689                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1690
1691                 /* The addresses with Bit 9 set are bad memory blocks. */
1692                 if (!(val & (1 << 9))) {
1693                         good_mbuf[good_mbuf_cnt] = (u16) val;
1694                         good_mbuf_cnt++;
1695                 }
1696
1697                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1698         }
1699
1700         /* Free the good ones back to the mbuf pool thus discarding
1701          * all the bad ones. */
1702         while (good_mbuf_cnt) {
1703                 good_mbuf_cnt--;
1704
1705                 val = good_mbuf[good_mbuf_cnt];
1706                 val = (val << 9) | val | 1;
1707
1708                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1709         }
1710         kfree(good_mbuf);
1711         return 0;
1712 }
1713
1714 static void
1715 bnx2_set_mac_addr(struct bnx2 *bp)
1716 {
1717         u32 val;
1718         u8 *mac_addr = bp->dev->dev_addr;
1719
1720         val = (mac_addr[0] << 8) | mac_addr[1];
1721
1722         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1723
1724         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1725                 (mac_addr[4] << 8) | mac_addr[5];
1726
1727         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1728 }
1729
1730 static inline int
1731 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1732 {
1733         struct sk_buff *skb;
1734         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1735         dma_addr_t mapping;
1736         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1737         unsigned long align;
1738
1739         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1740         if (skb == NULL) {
1741                 return -ENOMEM;
1742         }
1743
1744         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1745                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1746
1747         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1748                 PCI_DMA_FROMDEVICE);
1749
1750         rx_buf->skb = skb;
1751         pci_unmap_addr_set(rx_buf, mapping, mapping);
1752
1753         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1754         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1755
1756         bp->rx_prod_bseq += bp->rx_buf_use_size;
1757
1758         return 0;
1759 }
1760
1761 static void
1762 bnx2_phy_int(struct bnx2 *bp)
1763 {
1764         u32 new_link_state, old_link_state;
1765
1766         new_link_state = bp->status_blk->status_attn_bits &
1767                 STATUS_ATTN_BITS_LINK_STATE;
1768         old_link_state = bp->status_blk->status_attn_bits_ack &
1769                 STATUS_ATTN_BITS_LINK_STATE;
1770         if (new_link_state != old_link_state) {
1771                 if (new_link_state) {
1772                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1773                                 STATUS_ATTN_BITS_LINK_STATE);
1774                 }
1775                 else {
1776                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1777                                 STATUS_ATTN_BITS_LINK_STATE);
1778                 }
1779                 bnx2_set_link(bp);
1780         }
1781 }
1782
1783 static void
1784 bnx2_tx_int(struct bnx2 *bp)
1785 {
1786         struct status_block *sblk = bp->status_blk;
1787         u16 hw_cons, sw_cons, sw_ring_cons;
1788         int tx_free_bd = 0;
1789
1790         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1791         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1792                 hw_cons++;
1793         }
1794         sw_cons = bp->tx_cons;
1795
1796         while (sw_cons != hw_cons) {
1797                 struct sw_bd *tx_buf;
1798                 struct sk_buff *skb;
1799                 int i, last;
1800
1801                 sw_ring_cons = TX_RING_IDX(sw_cons);
1802
1803                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1804                 skb = tx_buf->skb;
1805
1806                 /* partial BD completions possible with TSO packets */
1807                 if (skb_is_gso(skb)) {
1808                         u16 last_idx, last_ring_idx;
1809
1810                         last_idx = sw_cons +
1811                                 skb_shinfo(skb)->nr_frags + 1;
1812                         last_ring_idx = sw_ring_cons +
1813                                 skb_shinfo(skb)->nr_frags + 1;
1814                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1815                                 last_idx++;
1816                         }
1817                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1818                                 break;
1819                         }
1820                 }
1821
1822                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1823                         skb_headlen(skb), PCI_DMA_TODEVICE);
1824
1825                 tx_buf->skb = NULL;
1826                 last = skb_shinfo(skb)->nr_frags;
1827
1828                 for (i = 0; i < last; i++) {
1829                         sw_cons = NEXT_TX_BD(sw_cons);
1830
1831                         pci_unmap_page(bp->pdev,
1832                                 pci_unmap_addr(
1833                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1834                                         mapping),
1835                                 skb_shinfo(skb)->frags[i].size,
1836                                 PCI_DMA_TODEVICE);
1837                 }
1838
1839                 sw_cons = NEXT_TX_BD(sw_cons);
1840
1841                 tx_free_bd += last + 1;
1842
1843                 dev_kfree_skb(skb);
1844
1845                 hw_cons = bp->hw_tx_cons =
1846                         sblk->status_tx_quick_consumer_index0;
1847
1848                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1849                         hw_cons++;
1850                 }
1851         }
1852
1853         bp->tx_cons = sw_cons;
1854         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1855          * before checking for netif_queue_stopped().  Without the
1856          * memory barrier, there is a small possibility that bnx2_start_xmit()
1857          * will miss it and cause the queue to be stopped forever.
1858          */
1859         smp_mb();
1860
1861         if (unlikely(netif_queue_stopped(bp->dev)) &&
1862                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1863                 netif_tx_lock(bp->dev);
1864                 if ((netif_queue_stopped(bp->dev)) &&
1865                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1866                         netif_wake_queue(bp->dev);
1867                 netif_tx_unlock(bp->dev);
1868         }
1869 }
1870
1871 static inline void
1872 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1873         u16 cons, u16 prod)
1874 {
1875         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1876         struct rx_bd *cons_bd, *prod_bd;
1877
1878         cons_rx_buf = &bp->rx_buf_ring[cons];
1879         prod_rx_buf = &bp->rx_buf_ring[prod];
1880
1881         pci_dma_sync_single_for_device(bp->pdev,
1882                 pci_unmap_addr(cons_rx_buf, mapping),
1883                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1884
1885         bp->rx_prod_bseq += bp->rx_buf_use_size;
1886
1887         prod_rx_buf->skb = skb;
1888
1889         if (cons == prod)
1890                 return;
1891
1892         pci_unmap_addr_set(prod_rx_buf, mapping,
1893                         pci_unmap_addr(cons_rx_buf, mapping));
1894
1895         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1896         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1897         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1898         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1899 }
1900
1901 static int
1902 bnx2_rx_int(struct bnx2 *bp, int budget)
1903 {
1904         struct status_block *sblk = bp->status_blk;
1905         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1906         struct l2_fhdr *rx_hdr;
1907         int rx_pkt = 0;
1908
1909         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1910         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1911                 hw_cons++;
1912         }
1913         sw_cons = bp->rx_cons;
1914         sw_prod = bp->rx_prod;
1915
1916         /* Memory barrier necessary as speculative reads of the rx
1917          * buffer can be ahead of the index in the status block
1918          */
1919         rmb();
1920         while (sw_cons != hw_cons) {
1921                 unsigned int len;
1922                 u32 status;
1923                 struct sw_bd *rx_buf;
1924                 struct sk_buff *skb;
1925                 dma_addr_t dma_addr;
1926
1927                 sw_ring_cons = RX_RING_IDX(sw_cons);
1928                 sw_ring_prod = RX_RING_IDX(sw_prod);
1929
1930                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1931                 skb = rx_buf->skb;
1932
1933                 rx_buf->skb = NULL;
1934
1935                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1936
1937                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1938                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1939
1940                 rx_hdr = (struct l2_fhdr *) skb->data;
1941                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1942
1943                 if ((status = rx_hdr->l2_fhdr_status) &
1944                         (L2_FHDR_ERRORS_BAD_CRC |
1945                         L2_FHDR_ERRORS_PHY_DECODE |
1946                         L2_FHDR_ERRORS_ALIGNMENT |
1947                         L2_FHDR_ERRORS_TOO_SHORT |
1948                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1949
1950                         goto reuse_rx;
1951                 }
1952
1953                 /* Since we don't have a jumbo ring, copy small packets
1954                  * if mtu > 1500
1955                  */
1956                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1957                         struct sk_buff *new_skb;
1958
1959                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1960                         if (new_skb == NULL)
1961                                 goto reuse_rx;
1962
1963                         /* aligned copy */
1964                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1965                                       new_skb->data, len + 2);
1966                         skb_reserve(new_skb, 2);
1967                         skb_put(new_skb, len);
1968
1969                         bnx2_reuse_rx_skb(bp, skb,
1970                                 sw_ring_cons, sw_ring_prod);
1971
1972                         skb = new_skb;
1973                 }
1974                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1975                         pci_unmap_single(bp->pdev, dma_addr,
1976                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1977
1978                         skb_reserve(skb, bp->rx_offset);
1979                         skb_put(skb, len);
1980                 }
1981                 else {
1982 reuse_rx:
1983                         bnx2_reuse_rx_skb(bp, skb,
1984                                 sw_ring_cons, sw_ring_prod);
1985                         goto next_rx;
1986                 }
1987
1988                 skb->protocol = eth_type_trans(skb, bp->dev);
1989
1990                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1991                         (ntohs(skb->protocol) != 0x8100)) {
1992
1993                         dev_kfree_skb(skb);
1994                         goto next_rx;
1995
1996                 }
1997
1998                 skb->ip_summed = CHECKSUM_NONE;
1999                 if (bp->rx_csum &&
2000                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2001                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2002
2003                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2004                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2005                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2006                 }
2007
2008 #ifdef BCM_VLAN
2009                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2010                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2011                                 rx_hdr->l2_fhdr_vlan_tag);
2012                 }
2013                 else
2014 #endif
2015                         netif_receive_skb(skb);
2016
2017                 bp->dev->last_rx = jiffies;
2018                 rx_pkt++;
2019
2020 next_rx:
2021                 sw_cons = NEXT_RX_BD(sw_cons);
2022                 sw_prod = NEXT_RX_BD(sw_prod);
2023
2024                 if ((rx_pkt == budget))
2025                         break;
2026
2027                 /* Refresh hw_cons to see if there is new work */
2028                 if (sw_cons == hw_cons) {
2029                         hw_cons = bp->hw_rx_cons =
2030                                 sblk->status_rx_quick_consumer_index0;
2031                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2032                                 hw_cons++;
2033                         rmb();
2034                 }
2035         }
2036         bp->rx_cons = sw_cons;
2037         bp->rx_prod = sw_prod;
2038
2039         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2040
2041         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2042
2043         mmiowb();
2044
2045         return rx_pkt;
2046
2047 }
2048
2049 /* MSI ISR - The only difference between this and the INTx ISR
2050  * is that the MSI interrupt is always serviced.
2051  */
2052 static irqreturn_t
2053 bnx2_msi(int irq, void *dev_instance)
2054 {
2055         struct net_device *dev = dev_instance;
2056         struct bnx2 *bp = netdev_priv(dev);
2057
2058         prefetch(bp->status_blk);
2059         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2060                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2061                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2062
2063         /* Return here if interrupt is disabled. */
2064         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2065                 return IRQ_HANDLED;
2066
2067         netif_rx_schedule(dev);
2068
2069         return IRQ_HANDLED;
2070 }
2071
2072 static irqreturn_t
2073 bnx2_interrupt(int irq, void *dev_instance)
2074 {
2075         struct net_device *dev = dev_instance;
2076         struct bnx2 *bp = netdev_priv(dev);
2077
2078         /* When using INTx, it is possible for the interrupt to arrive
2079          * at the CPU before the status block posted prior to the
2080          * interrupt. Reading a register will flush the status block.
2081          * When using MSI, the MSI message will always complete after
2082          * the status block write.
2083          */
2084         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2085             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2086              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2087                 return IRQ_NONE;
2088
2089         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2091                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2092
2093         /* Return here if interrupt is shared and is disabled. */
2094         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2095                 return IRQ_HANDLED;
2096
2097         netif_rx_schedule(dev);
2098
2099         return IRQ_HANDLED;
2100 }
2101
2102 static inline int
2103 bnx2_has_work(struct bnx2 *bp)
2104 {
2105         struct status_block *sblk = bp->status_blk;
2106
2107         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2108             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2109                 return 1;
2110
2111         if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2112             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2113                 return 1;
2114
2115         return 0;
2116 }
2117
2118 static int
2119 bnx2_poll(struct net_device *dev, int *budget)
2120 {
2121         struct bnx2 *bp = netdev_priv(dev);
2122
2123         if ((bp->status_blk->status_attn_bits &
2124                 STATUS_ATTN_BITS_LINK_STATE) !=
2125                 (bp->status_blk->status_attn_bits_ack &
2126                 STATUS_ATTN_BITS_LINK_STATE)) {
2127
2128                 spin_lock(&bp->phy_lock);
2129                 bnx2_phy_int(bp);
2130                 spin_unlock(&bp->phy_lock);
2131
2132                 /* This is needed to take care of transient status
2133                  * during link changes.
2134                  */
2135                 REG_WR(bp, BNX2_HC_COMMAND,
2136                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2137                 REG_RD(bp, BNX2_HC_COMMAND);
2138         }
2139
2140         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2141                 bnx2_tx_int(bp);
2142
2143         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2144                 int orig_budget = *budget;
2145                 int work_done;
2146
2147                 if (orig_budget > dev->quota)
2148                         orig_budget = dev->quota;
2149
2150                 work_done = bnx2_rx_int(bp, orig_budget);
2151                 *budget -= work_done;
2152                 dev->quota -= work_done;
2153         }
2154
2155         bp->last_status_idx = bp->status_blk->status_idx;
2156         rmb();
2157
2158         if (!bnx2_has_work(bp)) {
2159                 netif_rx_complete(dev);
2160                 if (likely(bp->flags & USING_MSI_FLAG)) {
2161                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2162                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2163                                bp->last_status_idx);
2164                         return 0;
2165                 }
2166                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2167                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2168                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2169                        bp->last_status_idx);
2170
2171                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2172                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2173                        bp->last_status_idx);
2174                 return 0;
2175         }
2176
2177         return 1;
2178 }
2179
2180 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2181  * from set_multicast.
2182  */
2183 static void
2184 bnx2_set_rx_mode(struct net_device *dev)
2185 {
2186         struct bnx2 *bp = netdev_priv(dev);
2187         u32 rx_mode, sort_mode;
2188         int i;
2189
2190         spin_lock_bh(&bp->phy_lock);
2191
2192         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2193                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2194         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2195 #ifdef BCM_VLAN
2196         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2197                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2198 #else
2199         if (!(bp->flags & ASF_ENABLE_FLAG))
2200                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2201 #endif
2202         if (dev->flags & IFF_PROMISC) {
2203                 /* Promiscuous mode. */
2204                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2205                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2206                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2207         }
2208         else if (dev->flags & IFF_ALLMULTI) {
2209                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2210                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2211                                0xffffffff);
2212                 }
2213                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2214         }
2215         else {
2216                 /* Accept one or more multicast(s). */
2217                 struct dev_mc_list *mclist;
2218                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2219                 u32 regidx;
2220                 u32 bit;
2221                 u32 crc;
2222
2223                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2224
2225                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2226                      i++, mclist = mclist->next) {
2227
2228                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2229                         bit = crc & 0xff;
2230                         regidx = (bit & 0xe0) >> 5;
2231                         bit &= 0x1f;
2232                         mc_filter[regidx] |= (1 << bit);
2233                 }
2234
2235                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2236                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2237                                mc_filter[i]);
2238                 }
2239
2240                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2241         }
2242
2243         if (rx_mode != bp->rx_mode) {
2244                 bp->rx_mode = rx_mode;
2245                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2246         }
2247
2248         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2249         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2250         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2251
2252         spin_unlock_bh(&bp->phy_lock);
2253 }
2254
2255 #define FW_BUF_SIZE     0x8000
2256
2257 static int
2258 bnx2_gunzip_init(struct bnx2 *bp)
2259 {
2260         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2261                 goto gunzip_nomem1;
2262
2263         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2264                 goto gunzip_nomem2;
2265
2266         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2267         if (bp->strm->workspace == NULL)
2268                 goto gunzip_nomem3;
2269
2270         return 0;
2271
2272 gunzip_nomem3:
2273         kfree(bp->strm);
2274         bp->strm = NULL;
2275
2276 gunzip_nomem2:
2277         vfree(bp->gunzip_buf);
2278         bp->gunzip_buf = NULL;
2279
2280 gunzip_nomem1:
2281         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2282                             "uncompression.\n", bp->dev->name);
2283         return -ENOMEM;
2284 }
2285
2286 static void
2287 bnx2_gunzip_end(struct bnx2 *bp)
2288 {
2289         kfree(bp->strm->workspace);
2290
2291         kfree(bp->strm);
2292         bp->strm = NULL;
2293
2294         if (bp->gunzip_buf) {
2295                 vfree(bp->gunzip_buf);
2296                 bp->gunzip_buf = NULL;
2297         }
2298 }
2299
2300 static int
2301 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2302 {
2303         int n, rc;
2304
2305         /* check gzip header */
2306         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2307                 return -EINVAL;
2308
2309         n = 10;
2310
2311 #define FNAME   0x8
2312         if (zbuf[3] & FNAME)
2313                 while ((zbuf[n++] != 0) && (n < len));
2314
2315         bp->strm->next_in = zbuf + n;
2316         bp->strm->avail_in = len - n;
2317         bp->strm->next_out = bp->gunzip_buf;
2318         bp->strm->avail_out = FW_BUF_SIZE;
2319
2320         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2321         if (rc != Z_OK)
2322                 return rc;
2323
2324         rc = zlib_inflate(bp->strm, Z_FINISH);
2325
2326         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2327         *outbuf = bp->gunzip_buf;
2328
2329         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2330                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2331                        bp->dev->name, bp->strm->msg);
2332
2333         zlib_inflateEnd(bp->strm);
2334
2335         if (rc == Z_STREAM_END)
2336                 return 0;
2337
2338         return rc;
2339 }
2340
2341 static void
2342 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2343         u32 rv2p_proc)
2344 {
2345         int i;
2346         u32 val;
2347
2348
2349         for (i = 0; i < rv2p_code_len; i += 8) {
2350                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2351                 rv2p_code++;
2352                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2353                 rv2p_code++;
2354
2355                 if (rv2p_proc == RV2P_PROC1) {
2356                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2357                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2358                 }
2359                 else {
2360                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2361                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2362                 }
2363         }
2364
2365         /* Reset the processor, un-stall is done later. */
2366         if (rv2p_proc == RV2P_PROC1) {
2367                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2368         }
2369         else {
2370                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2371         }
2372 }
2373
2374 static int
2375 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2376 {
2377         u32 offset;
2378         u32 val;
2379         int rc;
2380
2381         /* Halt the CPU. */
2382         val = REG_RD_IND(bp, cpu_reg->mode);
2383         val |= cpu_reg->mode_value_halt;
2384         REG_WR_IND(bp, cpu_reg->mode, val);
2385         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2386
2387         /* Load the Text area. */
2388         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2389         if (fw->gz_text) {
2390                 u32 text_len;
2391                 void *text;
2392
2393                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2394                                  &text_len);
2395                 if (rc)
2396                         return rc;
2397
2398                 fw->text = text;
2399         }
2400         if (fw->gz_text) {
2401                 int j;
2402
2403                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2404                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2405                 }
2406         }
2407
2408         /* Load the Data area. */
2409         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2410         if (fw->data) {
2411                 int j;
2412
2413                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2414                         REG_WR_IND(bp, offset, fw->data[j]);
2415                 }
2416         }
2417
2418         /* Load the SBSS area. */
2419         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2420         if (fw->sbss) {
2421                 int j;
2422
2423                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2424                         REG_WR_IND(bp, offset, fw->sbss[j]);
2425                 }
2426         }
2427
2428         /* Load the BSS area. */
2429         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2430         if (fw->bss) {
2431                 int j;
2432
2433                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2434                         REG_WR_IND(bp, offset, fw->bss[j]);
2435                 }
2436         }
2437
2438         /* Load the Read-Only area. */
2439         offset = cpu_reg->spad_base +
2440                 (fw->rodata_addr - cpu_reg->mips_view_base);
2441         if (fw->rodata) {
2442                 int j;
2443
2444                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2445                         REG_WR_IND(bp, offset, fw->rodata[j]);
2446                 }
2447         }
2448
2449         /* Clear the pre-fetch instruction. */
2450         REG_WR_IND(bp, cpu_reg->inst, 0);
2451         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2452
2453         /* Start the CPU. */
2454         val = REG_RD_IND(bp, cpu_reg->mode);
2455         val &= ~cpu_reg->mode_value_halt;
2456         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2457         REG_WR_IND(bp, cpu_reg->mode, val);
2458
2459         return 0;
2460 }
2461
2462 static int
2463 bnx2_init_cpus(struct bnx2 *bp)
2464 {
2465         struct cpu_reg cpu_reg;
2466         struct fw_info *fw;
2467         int rc = 0;
2468         void *text;
2469         u32 text_len;
2470
2471         if ((rc = bnx2_gunzip_init(bp)) != 0)
2472                 return rc;
2473
2474         /* Initialize the RV2P processor. */
2475         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2476                          &text_len);
2477         if (rc)
2478                 goto init_cpu_err;
2479
2480         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2481
2482         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2483                          &text_len);
2484         if (rc)
2485                 goto init_cpu_err;
2486
2487         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2488
2489         /* Initialize the RX Processor. */
2490         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2491         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2492         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2493         cpu_reg.state = BNX2_RXP_CPU_STATE;
2494         cpu_reg.state_value_clear = 0xffffff;
2495         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2496         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2497         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2498         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2499         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2500         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2501         cpu_reg.mips_view_base = 0x8000000;
2502
2503         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2504                 fw = &bnx2_rxp_fw_09;
2505         else
2506                 fw = &bnx2_rxp_fw_06;
2507
2508         rc = load_cpu_fw(bp, &cpu_reg, fw);
2509         if (rc)
2510                 goto init_cpu_err;
2511
2512         /* Initialize the TX Processor. */
2513         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2514         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2515         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2516         cpu_reg.state = BNX2_TXP_CPU_STATE;
2517         cpu_reg.state_value_clear = 0xffffff;
2518         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2519         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2520         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2521         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2522         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2523         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2524         cpu_reg.mips_view_base = 0x8000000;
2525
2526         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2527                 fw = &bnx2_txp_fw_09;
2528         else
2529                 fw = &bnx2_txp_fw_06;
2530
2531         rc = load_cpu_fw(bp, &cpu_reg, fw);
2532         if (rc)
2533                 goto init_cpu_err;
2534
2535         /* Initialize the TX Patch-up Processor. */
2536         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2537         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2538         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2539         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2540         cpu_reg.state_value_clear = 0xffffff;
2541         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2542         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2543         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2544         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2545         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2546         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2547         cpu_reg.mips_view_base = 0x8000000;
2548
2549         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2550                 fw = &bnx2_tpat_fw_09;
2551         else
2552                 fw = &bnx2_tpat_fw_06;
2553
2554         rc = load_cpu_fw(bp, &cpu_reg, fw);
2555         if (rc)
2556                 goto init_cpu_err;
2557
2558         /* Initialize the Completion Processor. */
2559         cpu_reg.mode = BNX2_COM_CPU_MODE;
2560         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2561         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2562         cpu_reg.state = BNX2_COM_CPU_STATE;
2563         cpu_reg.state_value_clear = 0xffffff;
2564         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2565         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2566         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2567         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2568         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2569         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2570         cpu_reg.mips_view_base = 0x8000000;
2571
2572         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2573                 fw = &bnx2_com_fw_09;
2574         else
2575                 fw = &bnx2_com_fw_06;
2576
2577         rc = load_cpu_fw(bp, &cpu_reg, fw);
2578         if (rc)
2579                 goto init_cpu_err;
2580
2581         /* Initialize the Command Processor. */
2582         cpu_reg.mode = BNX2_CP_CPU_MODE;
2583         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2584         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2585         cpu_reg.state = BNX2_CP_CPU_STATE;
2586         cpu_reg.state_value_clear = 0xffffff;
2587         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2588         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2589         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2590         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2591         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2592         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2593         cpu_reg.mips_view_base = 0x8000000;
2594
2595         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2596                 fw = &bnx2_cp_fw_09;
2597
2598                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2599                 if (rc)
2600                         goto init_cpu_err;
2601         }
2602 init_cpu_err:
2603         bnx2_gunzip_end(bp);
2604         return rc;
2605 }
2606
2607 static int
2608 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2609 {
2610         u16 pmcsr;
2611
2612         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2613
2614         switch (state) {
2615         case PCI_D0: {
2616                 u32 val;
2617
2618                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2619                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2620                         PCI_PM_CTRL_PME_STATUS);
2621
2622                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2623                         /* delay required during transition out of D3hot */
2624                         msleep(20);
2625
2626                 val = REG_RD(bp, BNX2_EMAC_MODE);
2627                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2628                 val &= ~BNX2_EMAC_MODE_MPKT;
2629                 REG_WR(bp, BNX2_EMAC_MODE, val);
2630
2631                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2632                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2633                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2634                 break;
2635         }
2636         case PCI_D3hot: {
2637                 int i;
2638                 u32 val, wol_msg;
2639
2640                 if (bp->wol) {
2641                         u32 advertising;
2642                         u8 autoneg;
2643
2644                         autoneg = bp->autoneg;
2645                         advertising = bp->advertising;
2646
2647                         bp->autoneg = AUTONEG_SPEED;
2648                         bp->advertising = ADVERTISED_10baseT_Half |
2649                                 ADVERTISED_10baseT_Full |
2650                                 ADVERTISED_100baseT_Half |
2651                                 ADVERTISED_100baseT_Full |
2652                                 ADVERTISED_Autoneg;
2653
2654                         bnx2_setup_copper_phy(bp);
2655
2656                         bp->autoneg = autoneg;
2657                         bp->advertising = advertising;
2658
2659                         bnx2_set_mac_addr(bp);
2660
2661                         val = REG_RD(bp, BNX2_EMAC_MODE);
2662
2663                         /* Enable port mode. */
2664                         val &= ~BNX2_EMAC_MODE_PORT;
2665                         val |= BNX2_EMAC_MODE_PORT_MII |
2666                                BNX2_EMAC_MODE_MPKT_RCVD |
2667                                BNX2_EMAC_MODE_ACPI_RCVD |
2668                                BNX2_EMAC_MODE_MPKT;
2669
2670                         REG_WR(bp, BNX2_EMAC_MODE, val);
2671
2672                         /* receive all multicast */
2673                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2674                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2675                                        0xffffffff);
2676                         }
2677                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2678                                BNX2_EMAC_RX_MODE_SORT_MODE);
2679
2680                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2681                               BNX2_RPM_SORT_USER0_MC_EN;
2682                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2683                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2684                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2685                                BNX2_RPM_SORT_USER0_ENA);
2686
2687                         /* Need to enable EMAC and RPM for WOL. */
2688                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2689                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2690                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2691                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2692
2693                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2694                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2695                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2696
2697                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2698                 }
2699                 else {
2700                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2701                 }
2702
2703                 if (!(bp->flags & NO_WOL_FLAG))
2704                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2705
2706                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2707                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2708                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2709
2710                         if (bp->wol)
2711                                 pmcsr |= 3;
2712                 }
2713                 else {
2714                         pmcsr |= 3;
2715                 }
2716                 if (bp->wol) {
2717                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2718                 }
2719                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2720                                       pmcsr);
2721
2722                 /* No more memory access after this point until
2723                  * device is brought back to D0.
2724                  */
2725                 udelay(50);
2726                 break;
2727         }
2728         default:
2729                 return -EINVAL;
2730         }
2731         return 0;
2732 }
2733
2734 static int
2735 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2736 {
2737         u32 val;
2738         int j;
2739
2740         /* Request access to the flash interface. */
2741         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2742         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2743                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2744                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2745                         break;
2746
2747                 udelay(5);
2748         }
2749
2750         if (j >= NVRAM_TIMEOUT_COUNT)
2751                 return -EBUSY;
2752
2753         return 0;
2754 }
2755
2756 static int
2757 bnx2_release_nvram_lock(struct bnx2 *bp)
2758 {
2759         int j;
2760         u32 val;
2761
2762         /* Relinquish nvram interface. */
2763         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2764
2765         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2766                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2767                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2768                         break;
2769
2770                 udelay(5);
2771         }
2772
2773         if (j >= NVRAM_TIMEOUT_COUNT)
2774                 return -EBUSY;
2775
2776         return 0;
2777 }
2778
2779
2780 static int
2781 bnx2_enable_nvram_write(struct bnx2 *bp)
2782 {
2783         u32 val;
2784
2785         val = REG_RD(bp, BNX2_MISC_CFG);
2786         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2787
2788         if (!bp->flash_info->buffered) {
2789                 int j;
2790
2791                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2792                 REG_WR(bp, BNX2_NVM_COMMAND,
2793                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2794
2795                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2796                         udelay(5);
2797
2798                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2799                         if (val & BNX2_NVM_COMMAND_DONE)
2800                                 break;
2801                 }
2802
2803                 if (j >= NVRAM_TIMEOUT_COUNT)
2804                         return -EBUSY;
2805         }
2806         return 0;
2807 }
2808
2809 static void
2810 bnx2_disable_nvram_write(struct bnx2 *bp)
2811 {
2812         u32 val;
2813
2814         val = REG_RD(bp, BNX2_MISC_CFG);
2815         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2816 }
2817
2818
2819 static void
2820 bnx2_enable_nvram_access(struct bnx2 *bp)
2821 {
2822         u32 val;
2823
2824         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2825         /* Enable both bits, even on read. */
2826         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2827                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2828 }
2829
2830 static void
2831 bnx2_disable_nvram_access(struct bnx2 *bp)
2832 {
2833         u32 val;
2834
2835         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2836         /* Disable both bits, even after read. */
2837         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2838                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2839                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2840 }
2841
2842 static int
2843 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2844 {
2845         u32 cmd;
2846         int j;
2847
2848         if (bp->flash_info->buffered)
2849                 /* Buffered flash, no erase needed */
2850                 return 0;
2851
2852         /* Build an erase command */
2853         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2854               BNX2_NVM_COMMAND_DOIT;
2855
2856         /* Need to clear DONE bit separately. */
2857         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2858
2859         /* Address of the NVRAM to read from. */
2860         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2861
2862         /* Issue an erase command. */
2863         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2864
2865         /* Wait for completion. */
2866         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2867                 u32 val;
2868
2869                 udelay(5);
2870
2871                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2872                 if (val & BNX2_NVM_COMMAND_DONE)
2873                         break;
2874         }
2875
2876         if (j >= NVRAM_TIMEOUT_COUNT)
2877                 return -EBUSY;
2878
2879         return 0;
2880 }
2881
2882 static int
2883 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2884 {
2885         u32 cmd;
2886         int j;
2887
2888         /* Build the command word. */
2889         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2890
2891         /* Calculate an offset of a buffered flash. */
2892         if (bp->flash_info->buffered) {
2893                 offset = ((offset / bp->flash_info->page_size) <<
2894                            bp->flash_info->page_bits) +
2895                           (offset % bp->flash_info->page_size);
2896         }
2897
2898         /* Need to clear DONE bit separately. */
2899         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2900
2901         /* Address of the NVRAM to read from. */
2902         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2903
2904         /* Issue a read command. */
2905         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2906
2907         /* Wait for completion. */
2908         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2909                 u32 val;
2910
2911                 udelay(5);
2912
2913                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2914                 if (val & BNX2_NVM_COMMAND_DONE) {
2915                         val = REG_RD(bp, BNX2_NVM_READ);
2916
2917                         val = be32_to_cpu(val);
2918                         memcpy(ret_val, &val, 4);
2919                         break;
2920                 }
2921         }
2922         if (j >= NVRAM_TIMEOUT_COUNT)
2923                 return -EBUSY;
2924
2925         return 0;
2926 }
2927
2928
2929 static int
2930 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2931 {
2932         u32 cmd, val32;
2933         int j;
2934
2935         /* Build the command word. */
2936         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2937
2938         /* Calculate an offset of a buffered flash. */
2939         if (bp->flash_info->buffered) {
2940                 offset = ((offset / bp->flash_info->page_size) <<
2941                           bp->flash_info->page_bits) +
2942                          (offset % bp->flash_info->page_size);
2943         }
2944
2945         /* Need to clear DONE bit separately. */
2946         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2947
2948         memcpy(&val32, val, 4);
2949         val32 = cpu_to_be32(val32);
2950
2951         /* Write the data. */
2952         REG_WR(bp, BNX2_NVM_WRITE, val32);
2953
2954         /* Address of the NVRAM to write to. */
2955         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2956
2957         /* Issue the write command. */
2958         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2959
2960         /* Wait for completion. */
2961         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2962                 udelay(5);
2963
2964                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2965                         break;
2966         }
2967         if (j >= NVRAM_TIMEOUT_COUNT)
2968                 return -EBUSY;
2969
2970         return 0;
2971 }
2972
2973 static int
2974 bnx2_init_nvram(struct bnx2 *bp)
2975 {
2976         u32 val;
2977         int j, entry_count, rc;
2978         struct flash_spec *flash;
2979
2980         /* Determine the selected interface. */
2981         val = REG_RD(bp, BNX2_NVM_CFG1);
2982
2983         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2984
2985         rc = 0;
2986         if (val & 0x40000000) {
2987
2988                 /* Flash interface has been reconfigured */
2989                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2990                      j++, flash++) {
2991                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2992                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2993                                 bp->flash_info = flash;
2994                                 break;
2995                         }
2996                 }
2997         }
2998         else {
2999                 u32 mask;
3000                 /* Not yet been reconfigured */
3001
3002                 if (val & (1 << 23))
3003                         mask = FLASH_BACKUP_STRAP_MASK;
3004                 else
3005                         mask = FLASH_STRAP_MASK;
3006
3007                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3008                         j++, flash++) {
3009
3010                         if ((val & mask) == (flash->strapping & mask)) {
3011                                 bp->flash_info = flash;
3012
3013                                 /* Request access to the flash interface. */
3014                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3015                                         return rc;
3016
3017                                 /* Enable access to flash interface */
3018                                 bnx2_enable_nvram_access(bp);
3019
3020                                 /* Reconfigure the flash interface */
3021                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3022                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3023                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3024                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3025
3026                                 /* Disable access to flash interface */
3027                                 bnx2_disable_nvram_access(bp);
3028                                 bnx2_release_nvram_lock(bp);
3029
3030                                 break;
3031                         }
3032                 }
3033         } /* if (val & 0x40000000) */
3034
3035         if (j == entry_count) {
3036                 bp->flash_info = NULL;
3037                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3038                 return -ENODEV;
3039         }
3040
3041         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3042         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3043         if (val)
3044                 bp->flash_size = val;
3045         else
3046                 bp->flash_size = bp->flash_info->total_size;
3047
3048         return rc;
3049 }
3050
3051 static int
3052 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3053                 int buf_size)
3054 {
3055         int rc = 0;
3056         u32 cmd_flags, offset32, len32, extra;
3057
3058         if (buf_size == 0)
3059                 return 0;
3060
3061         /* Request access to the flash interface. */
3062         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3063                 return rc;
3064
3065         /* Enable access to flash interface */
3066         bnx2_enable_nvram_access(bp);
3067
3068         len32 = buf_size;
3069         offset32 = offset;
3070         extra = 0;
3071
3072         cmd_flags = 0;
3073
3074         if (offset32 & 3) {
3075                 u8 buf[4];
3076                 u32 pre_len;
3077
3078                 offset32 &= ~3;
3079                 pre_len = 4 - (offset & 3);
3080
3081                 if (pre_len >= len32) {
3082                         pre_len = len32;
3083                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3084                                     BNX2_NVM_COMMAND_LAST;
3085                 }
3086                 else {
3087                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3088                 }
3089
3090                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3091
3092                 if (rc)
3093                         return rc;
3094
3095                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3096
3097                 offset32 += 4;
3098                 ret_buf += pre_len;
3099                 len32 -= pre_len;
3100         }
3101         if (len32 & 3) {
3102                 extra = 4 - (len32 & 3);
3103                 len32 = (len32 + 4) & ~3;
3104         }
3105
3106         if (len32 == 4) {
3107                 u8 buf[4];
3108
3109                 if (cmd_flags)
3110                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3111                 else
3112                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3113                                     BNX2_NVM_COMMAND_LAST;
3114
3115                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3116
3117                 memcpy(ret_buf, buf, 4 - extra);
3118         }
3119         else if (len32 > 0) {
3120                 u8 buf[4];
3121
3122                 /* Read the first word. */
3123                 if (cmd_flags)
3124                         cmd_flags = 0;
3125                 else
3126                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3127
3128                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3129
3130                 /* Advance to the next dword. */
3131                 offset32 += 4;
3132                 ret_buf += 4;
3133                 len32 -= 4;
3134
3135                 while (len32 > 4 && rc == 0) {
3136                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3137
3138                         /* Advance to the next dword. */
3139                         offset32 += 4;
3140                         ret_buf += 4;
3141                         len32 -= 4;
3142                 }
3143
3144                 if (rc)
3145                         return rc;
3146
3147                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3148                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3149
3150                 memcpy(ret_buf, buf, 4 - extra);
3151         }
3152
3153         /* Disable access to flash interface */
3154         bnx2_disable_nvram_access(bp);
3155
3156         bnx2_release_nvram_lock(bp);
3157
3158         return rc;
3159 }
3160
3161 static int
3162 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3163                 int buf_size)
3164 {
3165         u32 written, offset32, len32;
3166         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3167         int rc = 0;
3168         int align_start, align_end;
3169
3170         buf = data_buf;
3171         offset32 = offset;
3172         len32 = buf_size;
3173         align_start = align_end = 0;
3174
3175         if ((align_start = (offset32 & 3))) {
3176                 offset32 &= ~3;
3177                 len32 += align_start;
3178                 if (len32 < 4)
3179                         len32 = 4;
3180                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3181                         return rc;
3182         }
3183
3184         if (len32 & 3) {
3185                 align_end = 4 - (len32 & 3);
3186                 len32 += align_end;
3187                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3188                         return rc;
3189         }
3190
3191         if (align_start || align_end) {
3192                 align_buf = kmalloc(len32, GFP_KERNEL);
3193                 if (align_buf == NULL)
3194                         return -ENOMEM;
3195                 if (align_start) {
3196                         memcpy(align_buf, start, 4);
3197                 }
3198                 if (align_end) {
3199                         memcpy(align_buf + len32 - 4, end, 4);
3200                 }
3201                 memcpy(align_buf + align_start, data_buf, buf_size);
3202                 buf = align_buf;
3203         }
3204
3205         if (bp->flash_info->buffered == 0) {
3206                 flash_buffer = kmalloc(264, GFP_KERNEL);
3207                 if (flash_buffer == NULL) {
3208                         rc = -ENOMEM;
3209                         goto nvram_write_end;
3210                 }
3211         }
3212
3213         written = 0;
3214         while ((written < len32) && (rc == 0)) {
3215                 u32 page_start, page_end, data_start, data_end;
3216                 u32 addr, cmd_flags;
3217                 int i;
3218
3219                 /* Find the page_start addr */
3220                 page_start = offset32 + written;
3221                 page_start -= (page_start % bp->flash_info->page_size);
3222                 /* Find the page_end addr */
3223                 page_end = page_start + bp->flash_info->page_size;
3224                 /* Find the data_start addr */
3225                 data_start = (written == 0) ? offset32 : page_start;
3226                 /* Find the data_end addr */
3227                 data_end = (page_end > offset32 + len32) ?
3228                         (offset32 + len32) : page_end;
3229
3230                 /* Request access to the flash interface. */
3231                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3232                         goto nvram_write_end;
3233
3234                 /* Enable access to flash interface */
3235                 bnx2_enable_nvram_access(bp);
3236
3237                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3238                 if (bp->flash_info->buffered == 0) {
3239                         int j;
3240
3241                         /* Read the whole page into the buffer
3242                          * (non-buffer flash only) */
3243                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3244                                 if (j == (bp->flash_info->page_size - 4)) {
3245                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3246                                 }
3247                                 rc = bnx2_nvram_read_dword(bp,
3248                                         page_start + j,
3249                                         &flash_buffer[j],
3250                                         cmd_flags);
3251
3252                                 if (rc)
3253                                         goto nvram_write_end;
3254
3255                                 cmd_flags = 0;
3256                         }
3257                 }
3258
3259                 /* Enable writes to flash interface (unlock write-protect) */
3260                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3261                         goto nvram_write_end;
3262
3263                 /* Loop to write back the buffer data from page_start to
3264                  * data_start */
3265                 i = 0;
3266                 if (bp->flash_info->buffered == 0) {
3267                         /* Erase the page */
3268                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3269                                 goto nvram_write_end;
3270
3271                         /* Re-enable the write again for the actual write */
3272                         bnx2_enable_nvram_write(bp);
3273
3274                         for (addr = page_start; addr < data_start;
3275                                 addr += 4, i += 4) {
3276
3277                                 rc = bnx2_nvram_write_dword(bp, addr,
3278                                         &flash_buffer[i], cmd_flags);
3279
3280                                 if (rc != 0)
3281                                         goto nvram_write_end;
3282
3283                                 cmd_flags = 0;
3284                         }
3285                 }
3286
3287                 /* Loop to write the new data from data_start to data_end */
3288                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3289                         if ((addr == page_end - 4) ||
3290                                 ((bp->flash_info->buffered) &&
3291                                  (addr == data_end - 4))) {
3292
3293                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3294                         }
3295                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3296                                 cmd_flags);
3297
3298                         if (rc != 0)
3299                                 goto nvram_write_end;
3300
3301                         cmd_flags = 0;
3302                         buf += 4;
3303                 }
3304
3305                 /* Loop to write back the buffer data from data_end
3306                  * to page_end */
3307                 if (bp->flash_info->buffered == 0) {
3308                         for (addr = data_end; addr < page_end;
3309                                 addr += 4, i += 4) {
3310
3311                                 if (addr == page_end-4) {
3312                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3313                                 }
3314                                 rc = bnx2_nvram_write_dword(bp, addr,
3315                                         &flash_buffer[i], cmd_flags);
3316
3317                                 if (rc != 0)
3318                                         goto nvram_write_end;
3319
3320                                 cmd_flags = 0;
3321                         }
3322                 }
3323
3324                 /* Disable writes to flash interface (lock write-protect) */
3325                 bnx2_disable_nvram_write(bp);
3326
3327                 /* Disable access to flash interface */
3328                 bnx2_disable_nvram_access(bp);
3329                 bnx2_release_nvram_lock(bp);
3330
3331                 /* Increment written */
3332                 written += data_end - data_start;
3333         }
3334
3335 nvram_write_end:
3336         kfree(flash_buffer);
3337         kfree(align_buf);
3338         return rc;
3339 }
3340
3341 static int
3342 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3343 {
3344         u32 val;
3345         int i, rc = 0;
3346
3347         /* Wait for the current PCI transaction to complete before
3348          * issuing a reset. */
3349         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3350                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3351                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3352                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3353                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3354         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3355         udelay(5);
3356
3357         /* Wait for the firmware to tell us it is ok to issue a reset. */
3358         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3359
3360         /* Deposit a driver reset signature so the firmware knows that
3361          * this is a soft reset. */
3362         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3363                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3364
3365         /* Do a dummy read to force the chip to complete all current transaction
3366          * before we issue a reset. */
3367         val = REG_RD(bp, BNX2_MISC_ID);
3368
3369         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3370                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3371                 REG_RD(bp, BNX2_MISC_COMMAND);
3372                 udelay(5);
3373
3374                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3375                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3376
3377                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3378
3379         } else {
3380                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3381                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3382                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3383
3384                 /* Chip reset. */
3385                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3386
3387                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3388                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3389                         current->state = TASK_UNINTERRUPTIBLE;
3390                         schedule_timeout(HZ / 50);
3391                 }
3392
3393                 /* Reset takes approximate 30 usec */
3394                 for (i = 0; i < 10; i++) {
3395                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3396                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3397                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3398                                 break;
3399                         udelay(10);
3400                 }
3401
3402                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3403                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3404                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3405                         return -EBUSY;
3406                 }
3407         }
3408
3409         /* Make sure byte swapping is properly configured. */
3410         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3411         if (val != 0x01020304) {
3412                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3413                 return -ENODEV;
3414         }
3415
3416         /* Wait for the firmware to finish its initialization. */
3417         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3418         if (rc)
3419                 return rc;
3420
3421         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3422                 /* Adjust the voltage regular to two steps lower.  The default
3423                  * of this register is 0x0000000e. */
3424                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3425
3426                 /* Remove bad rbuf memory from the free pool. */
3427                 rc = bnx2_alloc_bad_rbuf(bp);
3428         }
3429
3430         return rc;
3431 }
3432
3433 static int
3434 bnx2_init_chip(struct bnx2 *bp)
3435 {
3436         u32 val;
3437         int rc;
3438
3439         /* Make sure the interrupt is not active. */
3440         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3441
3442         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3443               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3444 #ifdef __BIG_ENDIAN
3445               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3446 #endif
3447               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3448               DMA_READ_CHANS << 12 |
3449               DMA_WRITE_CHANS << 16;
3450
3451         val |= (0x2 << 20) | (1 << 11);
3452
3453         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3454                 val |= (1 << 23);
3455
3456         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3457             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3458                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3459
3460         REG_WR(bp, BNX2_DMA_CONFIG, val);
3461
3462         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3463                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3464                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3465                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3466         }
3467
3468         if (bp->flags & PCIX_FLAG) {
3469                 u16 val16;
3470
3471                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3472                                      &val16);
3473                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3474                                       val16 & ~PCI_X_CMD_ERO);
3475         }
3476
3477         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3478                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3479                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3480                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3481
3482         /* Initialize context mapping and zero out the quick contexts.  The
3483          * context block must have already been enabled. */
3484         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3485                 bnx2_init_5709_context(bp);
3486         else
3487                 bnx2_init_context(bp);
3488
3489         if ((rc = bnx2_init_cpus(bp)) != 0)
3490                 return rc;
3491
3492         bnx2_init_nvram(bp);
3493
3494         bnx2_set_mac_addr(bp);
3495
3496         val = REG_RD(bp, BNX2_MQ_CONFIG);
3497         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3498         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3499         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3500                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3501
3502         REG_WR(bp, BNX2_MQ_CONFIG, val);
3503
3504         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3505         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3506         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3507
3508         val = (BCM_PAGE_BITS - 8) << 24;
3509         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3510
3511         /* Configure page size. */
3512         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3513         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3514         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3515         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3516
3517         val = bp->mac_addr[0] +
3518               (bp->mac_addr[1] << 8) +
3519               (bp->mac_addr[2] << 16) +
3520               bp->mac_addr[3] +
3521               (bp->mac_addr[4] << 8) +
3522               (bp->mac_addr[5] << 16);
3523         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3524
3525         /* Program the MTU.  Also include 4 bytes for CRC32. */
3526         val = bp->dev->mtu + ETH_HLEN + 4;
3527         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3528                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3529         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3530
3531         bp->last_status_idx = 0;
3532         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3533
3534         /* Set up how to generate a link change interrupt. */
3535         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3536
3537         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3538                (u64) bp->status_blk_mapping & 0xffffffff);
3539         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3540
3541         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3542                (u64) bp->stats_blk_mapping & 0xffffffff);
3543         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3544                (u64) bp->stats_blk_mapping >> 32);
3545
3546         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3547                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3548
3549         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3550                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3551
3552         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3553                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3554
3555         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3556
3557         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3558
3559         REG_WR(bp, BNX2_HC_COM_TICKS,
3560                (bp->com_ticks_int << 16) | bp->com_ticks);
3561
3562         REG_WR(bp, BNX2_HC_CMD_TICKS,
3563                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3564
3565         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3566         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3567
3568         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3569                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3570         else {
3571                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3572                        BNX2_HC_CONFIG_TX_TMR_MODE |
3573                        BNX2_HC_CONFIG_COLLECT_STATS);
3574         }
3575
3576         /* Clear internal stats counters. */
3577         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3578
3579         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3580
3581         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3582             BNX2_PORT_FEATURE_ASF_ENABLED)
3583                 bp->flags |= ASF_ENABLE_FLAG;
3584
3585         /* Initialize the receive filter. */
3586         bnx2_set_rx_mode(bp->dev);
3587
3588         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3589                           0);
3590
3591         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3592         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3593
3594         udelay(20);
3595
3596         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3597
3598         return rc;
3599 }
3600
3601 static void
3602 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3603 {
3604         u32 val, offset0, offset1, offset2, offset3;
3605
3606         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3607                 offset0 = BNX2_L2CTX_TYPE_XI;
3608                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3609                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3610                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3611         } else {
3612                 offset0 = BNX2_L2CTX_TYPE;
3613                 offset1 = BNX2_L2CTX_CMD_TYPE;
3614                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3615                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3616         }
3617         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3618         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3619
3620         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3621         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3622
3623         val = (u64) bp->tx_desc_mapping >> 32;
3624         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3625
3626         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3627         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3628 }
3629
3630 static void
3631 bnx2_init_tx_ring(struct bnx2 *bp)
3632 {
3633         struct tx_bd *txbd;
3634         u32 cid;
3635
3636         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3637
3638         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3639
3640         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3641         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3642
3643         bp->tx_prod = 0;
3644         bp->tx_cons = 0;
3645         bp->hw_tx_cons = 0;
3646         bp->tx_prod_bseq = 0;
3647
3648         cid = TX_CID;
3649         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3650         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3651
3652         bnx2_init_tx_context(bp, cid);
3653 }
3654
3655 static void
3656 bnx2_init_rx_ring(struct bnx2 *bp)
3657 {
3658         struct rx_bd *rxbd;
3659         int i;
3660         u16 prod, ring_prod;
3661         u32 val;
3662
3663         /* 8 for CRC and VLAN */
3664         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3665         /* hw alignment */
3666         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3667
3668         ring_prod = prod = bp->rx_prod = 0;
3669         bp->rx_cons = 0;
3670         bp->hw_rx_cons = 0;
3671         bp->rx_prod_bseq = 0;
3672
3673         for (i = 0; i < bp->rx_max_ring; i++) {
3674                 int j;
3675
3676                 rxbd = &bp->rx_desc_ring[i][0];
3677                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3678                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3679                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3680                 }
3681                 if (i == (bp->rx_max_ring - 1))
3682                         j = 0;
3683                 else
3684                         j = i + 1;
3685                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3686                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3687                                        0xffffffff;
3688         }
3689
3690         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3691         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3692         val |= 0x02 << 8;
3693         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3694
3695         val = (u64) bp->rx_desc_mapping[0] >> 32;
3696         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3697
3698         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3699         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3700
3701         for (i = 0; i < bp->rx_ring_size; i++) {
3702                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3703                         break;
3704                 }
3705                 prod = NEXT_RX_BD(prod);
3706                 ring_prod = RX_RING_IDX(prod);
3707         }
3708         bp->rx_prod = prod;
3709
3710         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3711
3712         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3713 }
3714
3715 static void
3716 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3717 {
3718         u32 num_rings, max;
3719
3720         bp->rx_ring_size = size;
3721         num_rings = 1;
3722         while (size > MAX_RX_DESC_CNT) {
3723                 size -= MAX_RX_DESC_CNT;
3724                 num_rings++;
3725         }
3726         /* round to next power of 2 */
3727         max = MAX_RX_RINGS;
3728         while ((max & num_rings) == 0)
3729                 max >>= 1;
3730
3731         if (num_rings != max)
3732                 max <<= 1;
3733
3734         bp->rx_max_ring = max;
3735         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3736 }
3737
3738 static void
3739 bnx2_free_tx_skbs(struct bnx2 *bp)
3740 {
3741         int i;
3742
3743         if (bp->tx_buf_ring == NULL)
3744                 return;
3745
3746         for (i = 0; i < TX_DESC_CNT; ) {
3747                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3748                 struct sk_buff *skb = tx_buf->skb;
3749                 int j, last;
3750
3751                 if (skb == NULL) {
3752                         i++;
3753                         continue;
3754                 }
3755
3756                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3757                         skb_headlen(skb), PCI_DMA_TODEVICE);
3758
3759                 tx_buf->skb = NULL;
3760
3761                 last = skb_shinfo(skb)->nr_frags;
3762                 for (j = 0; j < last; j++) {
3763                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3764                         pci_unmap_page(bp->pdev,
3765                                 pci_unmap_addr(tx_buf, mapping),
3766                                 skb_shinfo(skb)->frags[j].size,
3767                                 PCI_DMA_TODEVICE);
3768                 }
3769                 dev_kfree_skb(skb);
3770                 i += j + 1;
3771         }
3772
3773 }
3774
3775 static void
3776 bnx2_free_rx_skbs(struct bnx2 *bp)
3777 {
3778         int i;
3779
3780         if (bp->rx_buf_ring == NULL)
3781                 return;
3782
3783         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3784                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3785                 struct sk_buff *skb = rx_buf->skb;
3786
3787                 if (skb == NULL)
3788                         continue;
3789
3790                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3791                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3792
3793                 rx_buf->skb = NULL;
3794
3795                 dev_kfree_skb(skb);
3796         }
3797 }
3798
3799 static void
3800 bnx2_free_skbs(struct bnx2 *bp)
3801 {
3802         bnx2_free_tx_skbs(bp);
3803         bnx2_free_rx_skbs(bp);
3804 }
3805
3806 static int
3807 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3808 {
3809         int rc;
3810
3811         rc = bnx2_reset_chip(bp, reset_code);
3812         bnx2_free_skbs(bp);
3813         if (rc)
3814                 return rc;
3815
3816         if ((rc = bnx2_init_chip(bp)) != 0)
3817                 return rc;
3818
3819         bnx2_init_tx_ring(bp);
3820         bnx2_init_rx_ring(bp);
3821         return 0;
3822 }
3823
3824 static int
3825 bnx2_init_nic(struct bnx2 *bp)
3826 {
3827         int rc;
3828
3829         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3830                 return rc;
3831
3832         spin_lock_bh(&bp->phy_lock);
3833         bnx2_init_phy(bp);
3834         spin_unlock_bh(&bp->phy_lock);
3835         bnx2_set_link(bp);
3836         return 0;
3837 }
3838
3839 static int
3840 bnx2_test_registers(struct bnx2 *bp)
3841 {
3842         int ret;
3843         int i, is_5709;
3844         static const struct {
3845                 u16   offset;
3846                 u16   flags;
3847 #define BNX2_FL_NOT_5709        1
3848                 u32   rw_mask;
3849                 u32   ro_mask;
3850         } reg_tbl[] = {
3851                 { 0x006c, 0, 0x00000000, 0x0000003f },
3852                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3853                 { 0x0094, 0, 0x00000000, 0x00000000 },
3854
3855                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3856                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3857                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3858                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3859                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3860                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3861                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3862                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3863                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3864
3865                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3866                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3867                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3868                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3869                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3870                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3871
3872                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3873                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3874                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
3875
3876                 { 0x1000, 0, 0x00000000, 0x00000001 },
3877                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3878
3879                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3880                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3881                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3882                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3883                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3884                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3885                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3886                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3887                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3888                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3889
3890                 { 0x1800, 0, 0x00000000, 0x00000001 },
3891                 { 0x1804, 0, 0x00000000, 0x00000003 },
3892
3893                 { 0x2800, 0, 0x00000000, 0x00000001 },
3894                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3895                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3896                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3897                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3898                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3899                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3900                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3901                 { 0x2840, 0, 0x00000000, 0xffffffff },
3902                 { 0x2844, 0, 0x00000000, 0xffffffff },
3903                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3904                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3905
3906                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3907                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3908
3909                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3910                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3911                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3912                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3913                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3914                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3915                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3916                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3917                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3918
3919                 { 0x5004, 0, 0x00000000, 0x0000007f },
3920                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3921
3922                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3923                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3924                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3925                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3926                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3927                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3928                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3929                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3930                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3931
3932                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3933                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3934                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3935                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3936                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3937                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3938                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3939                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3940                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3941                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3942                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3943                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3944                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3945                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3946                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3947                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3948                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3949                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3950                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3951                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3952                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3953                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3954                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3955
3956                 { 0xffff, 0, 0x00000000, 0x00000000 },
3957         };
3958
3959         ret = 0;
3960         is_5709 = 0;
3961         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3962                 is_5709 = 1;
3963
3964         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3965                 u32 offset, rw_mask, ro_mask, save_val, val;
3966                 u16 flags = reg_tbl[i].flags;
3967
3968                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3969                         continue;
3970
3971                 offset = (u32) reg_tbl[i].offset;
3972                 rw_mask = reg_tbl[i].rw_mask;
3973                 ro_mask = reg_tbl[i].ro_mask;
3974
3975                 save_val = readl(bp->regview + offset);
3976
3977                 writel(0, bp->regview + offset);
3978
3979                 val = readl(bp->regview + offset);
3980                 if ((val & rw_mask) != 0) {
3981                         goto reg_test_err;
3982                 }
3983
3984                 if ((val & ro_mask) != (save_val & ro_mask)) {
3985                         goto reg_test_err;
3986                 }
3987
3988                 writel(0xffffffff, bp->regview + offset);
3989
3990                 val = readl(bp->regview + offset);
3991                 if ((val & rw_mask) != rw_mask) {
3992                         goto reg_test_err;
3993                 }
3994
3995                 if ((val & ro_mask) != (save_val & ro_mask)) {
3996                         goto reg_test_err;
3997                 }
3998
3999                 writel(save_val, bp->regview + offset);
4000                 continue;
4001
4002 reg_test_err:
4003                 writel(save_val, bp->regview + offset);
4004                 ret = -ENODEV;
4005                 break;
4006         }
4007         return ret;
4008 }
4009
4010 static int
4011 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4012 {
4013         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4014                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4015         int i;
4016
4017         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4018                 u32 offset;
4019
4020                 for (offset = 0; offset < size; offset += 4) {
4021
4022                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4023
4024                         if (REG_RD_IND(bp, start + offset) !=
4025                                 test_pattern[i]) {
4026                                 return -ENODEV;
4027                         }
4028                 }
4029         }
4030         return 0;
4031 }
4032
4033 static int
4034 bnx2_test_memory(struct bnx2 *bp)
4035 {
4036         int ret = 0;
4037         int i;
4038         static struct mem_entry {
4039                 u32   offset;
4040                 u32   len;
4041         } mem_tbl_5706[] = {
4042                 { 0x60000,  0x4000 },
4043                 { 0xa0000,  0x3000 },
4044                 { 0xe0000,  0x4000 },
4045                 { 0x120000, 0x4000 },
4046                 { 0x1a0000, 0x4000 },
4047                 { 0x160000, 0x4000 },
4048                 { 0xffffffff, 0    },
4049         },
4050         mem_tbl_5709[] = {
4051                 { 0x60000,  0x4000 },
4052                 { 0xa0000,  0x3000 },
4053                 { 0xe0000,  0x4000 },
4054                 { 0x120000, 0x4000 },
4055                 { 0x1a0000, 0x4000 },
4056                 { 0xffffffff, 0    },
4057         };
4058         struct mem_entry *mem_tbl;
4059
4060         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4061                 mem_tbl = mem_tbl_5709;
4062         else
4063                 mem_tbl = mem_tbl_5706;
4064
4065         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4066                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4067                         mem_tbl[i].len)) != 0) {
4068                         return ret;
4069                 }
4070         }
4071
4072         return ret;
4073 }
4074
4075 #define BNX2_MAC_LOOPBACK       0
4076 #define BNX2_PHY_LOOPBACK       1
4077
4078 static int
4079 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4080 {
4081         unsigned int pkt_size, num_pkts, i;
4082         struct sk_buff *skb, *rx_skb;
4083         unsigned char *packet;
4084         u16 rx_start_idx, rx_idx;
4085         dma_addr_t map;
4086         struct tx_bd *txbd;
4087         struct sw_bd *rx_buf;
4088         struct l2_fhdr *rx_hdr;
4089         int ret = -ENODEV;
4090
4091         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4092                 bp->loopback = MAC_LOOPBACK;
4093                 bnx2_set_mac_loopback(bp);
4094         }
4095         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4096                 bp->loopback = PHY_LOOPBACK;
4097                 bnx2_set_phy_loopback(bp);
4098         }
4099         else
4100                 return -EINVAL;
4101
4102         pkt_size = 1514;
4103         skb = netdev_alloc_skb(bp->dev, pkt_size);
4104         if (!skb)
4105                 return -ENOMEM;
4106         packet = skb_put(skb, pkt_size);
4107         memcpy(packet, bp->dev->dev_addr, 6);
4108         memset(packet + 6, 0x0, 8);
4109         for (i = 14; i < pkt_size; i++)
4110                 packet[i] = (unsigned char) (i & 0xff);
4111
4112         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4113                 PCI_DMA_TODEVICE);
4114
4115         REG_WR(bp, BNX2_HC_COMMAND,
4116                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4117
4118         REG_RD(bp, BNX2_HC_COMMAND);
4119
4120         udelay(5);
4121         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4122
4123         num_pkts = 0;
4124
4125         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4126
4127         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4128         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4129         txbd->tx_bd_mss_nbytes = pkt_size;
4130         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4131
4132         num_pkts++;
4133         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4134         bp->tx_prod_bseq += pkt_size;
4135
4136         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4137         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4138
4139         udelay(100);
4140
4141         REG_WR(bp, BNX2_HC_COMMAND,
4142                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4143
4144         REG_RD(bp, BNX2_HC_COMMAND);
4145
4146         udelay(5);
4147
4148         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4149         dev_kfree_skb(skb);
4150
4151         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4152                 goto loopback_test_done;
4153         }
4154
4155         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4156         if (rx_idx != rx_start_idx + num_pkts) {
4157                 goto loopback_test_done;
4158         }
4159
4160         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4161         rx_skb = rx_buf->skb;
4162
4163         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4164         skb_reserve(rx_skb, bp->rx_offset);
4165
4166         pci_dma_sync_single_for_cpu(bp->pdev,
4167                 pci_unmap_addr(rx_buf, mapping),
4168                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4169
4170         if (rx_hdr->l2_fhdr_status &
4171                 (L2_FHDR_ERRORS_BAD_CRC |
4172                 L2_FHDR_ERRORS_PHY_DECODE |
4173                 L2_FHDR_ERRORS_ALIGNMENT |
4174                 L2_FHDR_ERRORS_TOO_SHORT |
4175                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4176
4177                 goto loopback_test_done;
4178         }
4179
4180         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4181                 goto loopback_test_done;
4182         }
4183
4184         for (i = 14; i < pkt_size; i++) {
4185                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4186                         goto loopback_test_done;
4187                 }
4188         }
4189
4190         ret = 0;
4191
4192 loopback_test_done:
4193         bp->loopback = 0;
4194         return ret;
4195 }
4196
4197 #define BNX2_MAC_LOOPBACK_FAILED        1
4198 #define BNX2_PHY_LOOPBACK_FAILED        2
4199 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4200                                          BNX2_PHY_LOOPBACK_FAILED)
4201
4202 static int
4203 bnx2_test_loopback(struct bnx2 *bp)
4204 {
4205         int rc = 0;
4206
4207         if (!netif_running(bp->dev))
4208                 return BNX2_LOOPBACK_FAILED;
4209
4210         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4211         spin_lock_bh(&bp->phy_lock);
4212         bnx2_init_phy(bp);
4213         spin_unlock_bh(&bp->phy_lock);
4214         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4215                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4216         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4217                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4218         return rc;
4219 }
4220
4221 #define NVRAM_SIZE 0x200
4222 #define CRC32_RESIDUAL 0xdebb20e3
4223
4224 static int
4225 bnx2_test_nvram(struct bnx2 *bp)
4226 {
4227         u32 buf[NVRAM_SIZE / 4];
4228         u8 *data = (u8 *) buf;
4229         int rc = 0;
4230         u32 magic, csum;
4231
4232         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4233                 goto test_nvram_done;
4234
4235         magic = be32_to_cpu(buf[0]);
4236         if (magic != 0x669955aa) {
4237                 rc = -ENODEV;
4238                 goto test_nvram_done;
4239         }
4240
4241         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4242                 goto test_nvram_done;
4243
4244         csum = ether_crc_le(0x100, data);
4245         if (csum != CRC32_RESIDUAL) {
4246                 rc = -ENODEV;
4247                 goto test_nvram_done;
4248         }
4249
4250         csum = ether_crc_le(0x100, data + 0x100);
4251         if (csum != CRC32_RESIDUAL) {
4252                 rc = -ENODEV;
4253         }
4254
4255 test_nvram_done:
4256         return rc;
4257 }
4258
4259 static int
4260 bnx2_test_link(struct bnx2 *bp)
4261 {
4262         u32 bmsr;
4263
4264         spin_lock_bh(&bp->phy_lock);
4265         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
4266         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
4267         spin_unlock_bh(&bp->phy_lock);
4268
4269         if (bmsr & BMSR_LSTATUS) {
4270                 return 0;
4271         }
4272         return -ENODEV;
4273 }
4274
4275 static int
4276 bnx2_test_intr(struct bnx2 *bp)
4277 {
4278         int i;
4279         u16 status_idx;
4280
4281         if (!netif_running(bp->dev))
4282                 return -ENODEV;
4283
4284         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4285
4286         /* This register is not touched during run-time. */
4287         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4288         REG_RD(bp, BNX2_HC_COMMAND);
4289
4290         for (i = 0; i < 10; i++) {
4291                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4292                         status_idx) {
4293
4294                         break;
4295                 }
4296
4297                 msleep_interruptible(10);
4298         }
4299         if (i < 10)
4300                 return 0;
4301
4302         return -ENODEV;
4303 }
4304
4305 static void
4306 bnx2_5706_serdes_timer(struct bnx2 *bp)
4307 {
4308         spin_lock(&bp->phy_lock);
4309         if (bp->serdes_an_pending)
4310                 bp->serdes_an_pending--;
4311         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4312                 u32 bmcr;
4313
4314                 bp->current_interval = bp->timer_interval;
4315
4316                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4317
4318                 if (bmcr & BMCR_ANENABLE) {
4319                         u32 phy1, phy2;
4320
4321                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4322                         bnx2_read_phy(bp, 0x1c, &phy1);
4323
4324                         bnx2_write_phy(bp, 0x17, 0x0f01);
4325                         bnx2_read_phy(bp, 0x15, &phy2);
4326                         bnx2_write_phy(bp, 0x17, 0x0f01);
4327                         bnx2_read_phy(bp, 0x15, &phy2);
4328
4329                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4330                                 !(phy2 & 0x20)) {       /* no CONFIG */
4331
4332                                 bmcr &= ~BMCR_ANENABLE;
4333                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4334                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4335                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4336                         }
4337                 }
4338         }
4339         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4340                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4341                 u32 phy2;
4342
4343                 bnx2_write_phy(bp, 0x17, 0x0f01);
4344                 bnx2_read_phy(bp, 0x15, &phy2);
4345                 if (phy2 & 0x20) {
4346                         u32 bmcr;
4347
4348                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4349                         bmcr |= BMCR_ANENABLE;
4350                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4351
4352                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4353                 }
4354         } else
4355                 bp->current_interval = bp->timer_interval;
4356
4357         spin_unlock(&bp->phy_lock);
4358 }
4359
4360 static void
4361 bnx2_5708_serdes_timer(struct bnx2 *bp)
4362 {
4363         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4364                 bp->serdes_an_pending = 0;
4365                 return;
4366         }
4367
4368         spin_lock(&bp->phy_lock);
4369         if (bp->serdes_an_pending)
4370                 bp->serdes_an_pending--;
4371         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4372                 u32 bmcr;
4373
4374                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4375                 if (bmcr & BMCR_ANENABLE) {
4376                         bnx2_enable_forced_2g5(bp);
4377                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4378                 } else {
4379                         bnx2_disable_forced_2g5(bp);
4380                         bp->serdes_an_pending = 2;
4381                         bp->current_interval = bp->timer_interval;
4382                 }
4383
4384         } else
4385                 bp->current_interval = bp->timer_interval;
4386
4387         spin_unlock(&bp->phy_lock);
4388 }
4389
4390 static void
4391 bnx2_timer(unsigned long data)
4392 {
4393         struct bnx2 *bp = (struct bnx2 *) data;
4394         u32 msg;
4395
4396         if (!netif_running(bp->dev))
4397                 return;
4398
4399         if (atomic_read(&bp->intr_sem) != 0)
4400                 goto bnx2_restart_timer;
4401
4402         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4403         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4404
4405         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4406
4407         if (bp->phy_flags & PHY_SERDES_FLAG) {
4408                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4409                         bnx2_5706_serdes_timer(bp);
4410                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4411                         bnx2_5708_serdes_timer(bp);
4412         }
4413
4414 bnx2_restart_timer:
4415         mod_timer(&bp->timer, jiffies + bp->current_interval);
4416 }
4417
4418 /* Called with rtnl_lock */
4419 static int
4420 bnx2_open(struct net_device *dev)
4421 {
4422         struct bnx2 *bp = netdev_priv(dev);
4423         int rc;
4424
4425         netif_carrier_off(dev);
4426
4427         bnx2_set_power_state(bp, PCI_D0);
4428         bnx2_disable_int(bp);
4429
4430         rc = bnx2_alloc_mem(bp);
4431         if (rc)
4432                 return rc;
4433
4434         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4435                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4436                 !disable_msi) {
4437
4438                 if (pci_enable_msi(bp->pdev) == 0) {
4439                         bp->flags |= USING_MSI_FLAG;
4440                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4441                                         dev);
4442                 }
4443                 else {
4444                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4445                                         IRQF_SHARED, dev->name, dev);
4446                 }
4447         }
4448         else {
4449                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4450                                 dev->name, dev);
4451         }
4452         if (rc) {
4453                 bnx2_free_mem(bp);
4454                 return rc;
4455         }
4456
4457         rc = bnx2_init_nic(bp);
4458
4459         if (rc) {
4460                 free_irq(bp->pdev->irq, dev);
4461                 if (bp->flags & USING_MSI_FLAG) {
4462                         pci_disable_msi(bp->pdev);
4463                         bp->flags &= ~USING_MSI_FLAG;
4464                 }
4465                 bnx2_free_skbs(bp);
4466                 bnx2_free_mem(bp);
4467                 return rc;
4468         }
4469
4470         mod_timer(&bp->timer, jiffies + bp->current_interval);
4471
4472         atomic_set(&bp->intr_sem, 0);
4473
4474         bnx2_enable_int(bp);
4475
4476         if (bp->flags & USING_MSI_FLAG) {
4477                 /* Test MSI to make sure it is working
4478                  * If MSI test fails, go back to INTx mode
4479                  */
4480                 if (bnx2_test_intr(bp) != 0) {
4481                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4482                                " using MSI, switching to INTx mode. Please"
4483                                " report this failure to the PCI maintainer"
4484                                " and include system chipset information.\n",
4485                                bp->dev->name);
4486
4487                         bnx2_disable_int(bp);
4488                         free_irq(bp->pdev->irq, dev);
4489                         pci_disable_msi(bp->pdev);
4490                         bp->flags &= ~USING_MSI_FLAG;
4491
4492                         rc = bnx2_init_nic(bp);
4493
4494                         if (!rc) {
4495                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4496                                         IRQF_SHARED, dev->name, dev);
4497                         }
4498                         if (rc) {
4499                                 bnx2_free_skbs(bp);
4500                                 bnx2_free_mem(bp);
4501                                 del_timer_sync(&bp->timer);
4502                                 return rc;
4503                         }
4504                         bnx2_enable_int(bp);
4505                 }
4506         }
4507         if (bp->flags & USING_MSI_FLAG) {
4508                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4509         }
4510
4511         netif_start_queue(dev);
4512
4513         return 0;
4514 }
4515
4516 static void
4517 bnx2_reset_task(struct work_struct *work)
4518 {
4519         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4520
4521         if (!netif_running(bp->dev))
4522                 return;
4523
4524         bp->in_reset_task = 1;
4525         bnx2_netif_stop(bp);
4526
4527         bnx2_init_nic(bp);
4528
4529         atomic_set(&bp->intr_sem, 1);
4530         bnx2_netif_start(bp);
4531         bp->in_reset_task = 0;
4532 }
4533
4534 static void
4535 bnx2_tx_timeout(struct net_device *dev)
4536 {
4537         struct bnx2 *bp = netdev_priv(dev);
4538
4539         /* This allows the netif to be shutdown gracefully before resetting */
4540         schedule_work(&bp->reset_task);
4541 }
4542
4543 #ifdef BCM_VLAN
4544 /* Called with rtnl_lock */
4545 static void
4546 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4547 {
4548         struct bnx2 *bp = netdev_priv(dev);
4549
4550         bnx2_netif_stop(bp);
4551
4552         bp->vlgrp = vlgrp;
4553         bnx2_set_rx_mode(dev);
4554
4555         bnx2_netif_start(bp);
4556 }
4557
4558 /* Called with rtnl_lock */
4559 static void
4560 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4561 {
4562         struct bnx2 *bp = netdev_priv(dev);
4563
4564         bnx2_netif_stop(bp);
4565         vlan_group_set_device(bp->vlgrp, vid, NULL);
4566         bnx2_set_rx_mode(dev);
4567
4568         bnx2_netif_start(bp);
4569 }
4570 #endif
4571
4572 /* Called with netif_tx_lock.
4573  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4574  * netif_wake_queue().
4575  */
4576 static int
4577 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4578 {
4579         struct bnx2 *bp = netdev_priv(dev);
4580         dma_addr_t mapping;
4581         struct tx_bd *txbd;
4582         struct sw_bd *tx_buf;
4583         u32 len, vlan_tag_flags, last_frag, mss;
4584         u16 prod, ring_prod;
4585         int i;
4586
4587         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4588                 netif_stop_queue(dev);
4589                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4590                         dev->name);
4591
4592                 return NETDEV_TX_BUSY;
4593         }
4594         len = skb_headlen(skb);
4595         prod = bp->tx_prod;
4596         ring_prod = TX_RING_IDX(prod);
4597
4598         vlan_tag_flags = 0;
4599         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4600                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4601         }
4602
4603         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4604                 vlan_tag_flags |=
4605                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4606         }
4607         if ((mss = skb_shinfo(skb)->gso_size) &&
4608                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4609                 u32 tcp_opt_len, ip_tcp_len;
4610                 struct iphdr *iph;
4611
4612                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4613
4614                 tcp_opt_len = tcp_optlen(skb);
4615
4616                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4617                         u32 tcp_off = skb_transport_offset(skb) -
4618                                       sizeof(struct ipv6hdr) - ETH_HLEN;
4619
4620                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4621                                           TX_BD_FLAGS_SW_FLAGS;
4622                         if (likely(tcp_off == 0))
4623                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4624                         else {
4625                                 tcp_off >>= 3;
4626                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
4627                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
4628                                                   ((tcp_off & 0x10) <<
4629                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
4630                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4631                         }
4632                 } else {
4633                         if (skb_header_cloned(skb) &&
4634                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4635                                 dev_kfree_skb(skb);
4636                                 return NETDEV_TX_OK;
4637                         }
4638
4639                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4640
4641                         iph = ip_hdr(skb);
4642                         iph->check = 0;
4643                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4644                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4645                                                                  iph->daddr, 0,
4646                                                                  IPPROTO_TCP,
4647                                                                  0);
4648                         if (tcp_opt_len || (iph->ihl > 5)) {
4649                                 vlan_tag_flags |= ((iph->ihl - 5) +
4650                                                    (tcp_opt_len >> 2)) << 8;
4651                         }
4652                 }
4653         } else
4654                 mss = 0;
4655
4656         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4657
4658         tx_buf = &bp->tx_buf_ring[ring_prod];
4659         tx_buf->skb = skb;
4660         pci_unmap_addr_set(tx_buf, mapping, mapping);
4661
4662         txbd = &bp->tx_desc_ring[ring_prod];
4663
4664         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4665         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4666         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4667         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4668
4669         last_frag = skb_shinfo(skb)->nr_frags;
4670
4671         for (i = 0; i < last_frag; i++) {
4672                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4673
4674                 prod = NEXT_TX_BD(prod);
4675                 ring_prod = TX_RING_IDX(prod);
4676                 txbd = &bp->tx_desc_ring[ring_prod];
4677
4678                 len = frag->size;
4679                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4680                         len, PCI_DMA_TODEVICE);
4681                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4682                                 mapping, mapping);
4683
4684                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4685                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4686                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4687                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4688
4689         }
4690         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4691
4692         prod = NEXT_TX_BD(prod);
4693         bp->tx_prod_bseq += skb->len;
4694
4695         REG_WR16(bp, bp->tx_bidx_addr, prod);
4696         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4697
4698         mmiowb();
4699
4700         bp->tx_prod = prod;
4701         dev->trans_start = jiffies;
4702
4703         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4704                 netif_stop_queue(dev);
4705                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4706                         netif_wake_queue(dev);
4707         }
4708
4709         return NETDEV_TX_OK;
4710 }
4711
4712 /* Called with rtnl_lock */
4713 static int
4714 bnx2_close(struct net_device *dev)
4715 {
4716         struct bnx2 *bp = netdev_priv(dev);
4717         u32 reset_code;
4718
4719         /* Calling flush_scheduled_work() may deadlock because
4720          * linkwatch_event() may be on the workqueue and it will try to get
4721          * the rtnl_lock which we are holding.
4722          */
4723         while (bp->in_reset_task)
4724                 msleep(1);
4725
4726         bnx2_netif_stop(bp);
4727         del_timer_sync(&bp->timer);
4728         if (bp->flags & NO_WOL_FLAG)
4729                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4730         else if (bp->wol)
4731                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4732         else
4733                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4734         bnx2_reset_chip(bp, reset_code);
4735         free_irq(bp->pdev->irq, dev);
4736         if (bp->flags & USING_MSI_FLAG) {
4737                 pci_disable_msi(bp->pdev);
4738                 bp->flags &= ~USING_MSI_FLAG;
4739         }
4740         bnx2_free_skbs(bp);
4741         bnx2_free_mem(bp);
4742         bp->link_up = 0;
4743         netif_carrier_off(bp->dev);
4744         bnx2_set_power_state(bp, PCI_D3hot);
4745         return 0;
4746 }
4747
4748 #define GET_NET_STATS64(ctr)                                    \
4749         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4750         (unsigned long) (ctr##_lo)
4751
4752 #define GET_NET_STATS32(ctr)            \
4753         (ctr##_lo)
4754
4755 #if (BITS_PER_LONG == 64)
4756 #define GET_NET_STATS   GET_NET_STATS64
4757 #else
4758 #define GET_NET_STATS   GET_NET_STATS32
4759 #endif
4760
4761 static struct net_device_stats *
4762 bnx2_get_stats(struct net_device *dev)
4763 {
4764         struct bnx2 *bp = netdev_priv(dev);
4765         struct statistics_block *stats_blk = bp->stats_blk;
4766         struct net_device_stats *net_stats = &bp->net_stats;
4767
4768         if (bp->stats_blk == NULL) {
4769                 return net_stats;
4770         }
4771         net_stats->rx_packets =
4772                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4773                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4774                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4775
4776         net_stats->tx_packets =
4777                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4778                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4779                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4780
4781         net_stats->rx_bytes =
4782                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4783
4784         net_stats->tx_bytes =
4785                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4786
4787         net_stats->multicast =
4788                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4789
4790         net_stats->collisions =
4791                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4792
4793         net_stats->rx_length_errors =
4794                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4795                 stats_blk->stat_EtherStatsOverrsizePkts);
4796
4797         net_stats->rx_over_errors =
4798                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4799
4800         net_stats->rx_frame_errors =
4801                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4802
4803         net_stats->rx_crc_errors =
4804                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4805
4806         net_stats->rx_errors = net_stats->rx_length_errors +
4807                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4808                 net_stats->rx_crc_errors;
4809
4810         net_stats->tx_aborted_errors =
4811                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4812                 stats_blk->stat_Dot3StatsLateCollisions);
4813
4814         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4815             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4816                 net_stats->tx_carrier_errors = 0;
4817         else {
4818                 net_stats->tx_carrier_errors =
4819                         (unsigned long)
4820                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4821         }
4822
4823         net_stats->tx_errors =
4824                 (unsigned long)
4825                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4826                 +
4827                 net_stats->tx_aborted_errors +
4828                 net_stats->tx_carrier_errors;
4829
4830         net_stats->rx_missed_errors =
4831                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4832                 stats_blk->stat_FwRxDrop);
4833
4834         return net_stats;
4835 }
4836
4837 /* All ethtool functions called with rtnl_lock */
4838
4839 static int
4840 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4841 {
4842         struct bnx2 *bp = netdev_priv(dev);
4843
4844         cmd->supported = SUPPORTED_Autoneg;
4845         if (bp->phy_flags & PHY_SERDES_FLAG) {
4846                 cmd->supported |= SUPPORTED_1000baseT_Full |
4847                         SUPPORTED_FIBRE;
4848                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
4849                         cmd->supported |= SUPPORTED_2500baseX_Full;
4850
4851                 cmd->port = PORT_FIBRE;
4852         }
4853         else {
4854                 cmd->supported |= SUPPORTED_10baseT_Half |
4855                         SUPPORTED_10baseT_Full |
4856                         SUPPORTED_100baseT_Half |
4857                         SUPPORTED_100baseT_Full |
4858                         SUPPORTED_1000baseT_Full |
4859                         SUPPORTED_TP;
4860
4861                 cmd->port = PORT_TP;
4862         }
4863
4864         cmd->advertising = bp->advertising;
4865
4866         if (bp->autoneg & AUTONEG_SPEED) {
4867                 cmd->autoneg = AUTONEG_ENABLE;
4868         }
4869         else {
4870                 cmd->autoneg = AUTONEG_DISABLE;
4871         }
4872
4873         if (netif_carrier_ok(dev)) {
4874                 cmd->speed = bp->line_speed;
4875                 cmd->duplex = bp->duplex;
4876         }
4877         else {
4878                 cmd->speed = -1;
4879                 cmd->duplex = -1;
4880         }
4881
4882         cmd->transceiver = XCVR_INTERNAL;
4883         cmd->phy_address = bp->phy_addr;
4884
4885         return 0;
4886 }
4887
4888 static int
4889 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4890 {
4891         struct bnx2 *bp = netdev_priv(dev);
4892         u8 autoneg = bp->autoneg;
4893         u8 req_duplex = bp->req_duplex;
4894         u16 req_line_speed = bp->req_line_speed;
4895         u32 advertising = bp->advertising;
4896
4897         if (cmd->autoneg == AUTONEG_ENABLE) {
4898                 autoneg |= AUTONEG_SPEED;
4899
4900                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4901
4902                 /* allow advertising 1 speed */
4903                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4904                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4905                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4906                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4907
4908                         if (bp->phy_flags & PHY_SERDES_FLAG)
4909                                 return -EINVAL;
4910
4911                         advertising = cmd->advertising;
4912
4913                 }
4914                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4915                         advertising = cmd->advertising;
4916                 }
4917                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4918                         return -EINVAL;
4919                 }
4920                 else {
4921                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4922                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4923                         }
4924                         else {
4925                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4926                         }
4927                 }
4928                 advertising |= ADVERTISED_Autoneg;
4929         }
4930         else {
4931                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4932                         if ((cmd->speed != SPEED_1000 &&
4933                              cmd->speed != SPEED_2500) ||
4934                             (cmd->duplex != DUPLEX_FULL))
4935                                 return -EINVAL;
4936
4937                         if (cmd->speed == SPEED_2500 &&
4938                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4939                                 return -EINVAL;
4940                 }
4941                 else if (cmd->speed == SPEED_1000) {
4942                         return -EINVAL;
4943                 }
4944                 autoneg &= ~AUTONEG_SPEED;
4945                 req_line_speed = cmd->speed;
4946                 req_duplex = cmd->duplex;
4947                 advertising = 0;
4948         }
4949
4950         bp->autoneg = autoneg;
4951         bp->advertising = advertising;
4952         bp->req_line_speed = req_line_speed;
4953         bp->req_duplex = req_duplex;
4954
4955         spin_lock_bh(&bp->phy_lock);
4956
4957         bnx2_setup_phy(bp);
4958
4959         spin_unlock_bh(&bp->phy_lock);
4960
4961         return 0;
4962 }
4963
4964 static void
4965 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4966 {
4967         struct bnx2 *bp = netdev_priv(dev);
4968
4969         strcpy(info->driver, DRV_MODULE_NAME);
4970         strcpy(info->version, DRV_MODULE_VERSION);
4971         strcpy(info->bus_info, pci_name(bp->pdev));
4972         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4973         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4974         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4975         info->fw_version[1] = info->fw_version[3] = '.';
4976         info->fw_version[5] = 0;
4977 }
4978
4979 #define BNX2_REGDUMP_LEN                (32 * 1024)
4980
4981 static int
4982 bnx2_get_regs_len(struct net_device *dev)
4983 {
4984         return BNX2_REGDUMP_LEN;
4985 }
4986
4987 static void
4988 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4989 {
4990         u32 *p = _p, i, offset;
4991         u8 *orig_p = _p;
4992         struct bnx2 *bp = netdev_priv(dev);
4993         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4994                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4995                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4996                                  0x1040, 0x1048, 0x1080, 0x10a4,
4997                                  0x1400, 0x1490, 0x1498, 0x14f0,
4998                                  0x1500, 0x155c, 0x1580, 0x15dc,
4999                                  0x1600, 0x1658, 0x1680, 0x16d8,
5000                                  0x1800, 0x1820, 0x1840, 0x1854,
5001                                  0x1880, 0x1894, 0x1900, 0x1984,
5002                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5003                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5004                                  0x2000, 0x2030, 0x23c0, 0x2400,
5005                                  0x2800, 0x2820, 0x2830, 0x2850,
5006                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5007                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5008                                  0x4080, 0x4090, 0x43c0, 0x4458,
5009                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5010                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5011                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5012                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5013                                  0x6800, 0x6848, 0x684c, 0x6860,
5014                                  0x6888, 0x6910, 0x8000 };
5015
5016         regs->version = 0;
5017
5018         memset(p, 0, BNX2_REGDUMP_LEN);
5019
5020         if (!netif_running(bp->dev))
5021                 return;
5022
5023         i = 0;
5024         offset = reg_boundaries[0];
5025         p += offset;
5026         while (offset < BNX2_REGDUMP_LEN) {
5027                 *p++ = REG_RD(bp, offset);
5028                 offset += 4;
5029                 if (offset == reg_boundaries[i + 1]) {
5030                         offset = reg_boundaries[i + 2];
5031                         p = (u32 *) (orig_p + offset);
5032                         i += 2;
5033                 }
5034         }
5035 }
5036
5037 static void
5038 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5039 {
5040         struct bnx2 *bp = netdev_priv(dev);
5041
5042         if (bp->flags & NO_WOL_FLAG) {
5043                 wol->supported = 0;
5044                 wol->wolopts = 0;
5045         }
5046         else {
5047                 wol->supported = WAKE_MAGIC;
5048                 if (bp->wol)
5049                         wol->wolopts = WAKE_MAGIC;
5050                 else
5051                         wol->wolopts = 0;
5052         }
5053         memset(&wol->sopass, 0, sizeof(wol->sopass));
5054 }
5055
5056 static int
5057 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5058 {
5059         struct bnx2 *bp = netdev_priv(dev);
5060
5061         if (wol->wolopts & ~WAKE_MAGIC)
5062                 return -EINVAL;
5063
5064         if (wol->wolopts & WAKE_MAGIC) {
5065                 if (bp->flags & NO_WOL_FLAG)
5066                         return -EINVAL;
5067
5068                 bp->wol = 1;
5069         }
5070         else {
5071                 bp->wol = 0;
5072         }
5073         return 0;
5074 }
5075
5076 static int
5077 bnx2_nway_reset(struct net_device *dev)
5078 {
5079         struct bnx2 *bp = netdev_priv(dev);
5080         u32 bmcr;
5081
5082         if (!(bp->autoneg & AUTONEG_SPEED)) {
5083                 return -EINVAL;
5084         }
5085
5086         spin_lock_bh(&bp->phy_lock);
5087
5088         /* Force a link down visible on the other side */
5089         if (bp->phy_flags & PHY_SERDES_FLAG) {
5090                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5091                 spin_unlock_bh(&bp->phy_lock);
5092
5093                 msleep(20);
5094
5095                 spin_lock_bh(&bp->phy_lock);
5096
5097                 bp->current_interval = SERDES_AN_TIMEOUT;
5098                 bp->serdes_an_pending = 1;
5099                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5100         }
5101
5102         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5103         bmcr &= ~BMCR_LOOPBACK;
5104         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5105
5106         spin_unlock_bh(&bp->phy_lock);
5107
5108         return 0;
5109 }
5110
5111 static int
5112 bnx2_get_eeprom_len(struct net_device *dev)
5113 {
5114         struct bnx2 *bp = netdev_priv(dev);
5115
5116         if (bp->flash_info == NULL)
5117                 return 0;
5118
5119         return (int) bp->flash_size;
5120 }
5121
5122 static int
5123 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5124                 u8 *eebuf)
5125 {
5126         struct bnx2 *bp = netdev_priv(dev);
5127         int rc;
5128
5129         /* parameters already validated in ethtool_get_eeprom */
5130
5131         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5132
5133         return rc;
5134 }
5135
5136 static int
5137 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5138                 u8 *eebuf)
5139 {
5140         struct bnx2 *bp = netdev_priv(dev);
5141         int rc;
5142
5143         /* parameters already validated in ethtool_set_eeprom */
5144
5145         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5146
5147         return rc;
5148 }
5149
5150 static int
5151 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5152 {
5153         struct bnx2 *bp = netdev_priv(dev);
5154
5155         memset(coal, 0, sizeof(struct ethtool_coalesce));
5156
5157         coal->rx_coalesce_usecs = bp->rx_ticks;
5158         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5159         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5160         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5161
5162         coal->tx_coalesce_usecs = bp->tx_ticks;
5163         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5164         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5165         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5166
5167         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5168
5169         return 0;
5170 }
5171
5172 static int
5173 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5174 {
5175         struct bnx2 *bp = netdev_priv(dev);
5176
5177         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5178         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5179
5180         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5181         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5182
5183         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5184         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5185
5186         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5187         if (bp->rx_quick_cons_trip_int > 0xff)
5188                 bp->rx_quick_cons_trip_int = 0xff;
5189
5190         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5191         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5192
5193         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5194         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5195
5196         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5197         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5198
5199         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5200         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5201                 0xff;
5202
5203         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5204         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5205         bp->stats_ticks &= 0xffff00;
5206
5207         if (netif_running(bp->dev)) {
5208                 bnx2_netif_stop(bp);
5209                 bnx2_init_nic(bp);
5210                 bnx2_netif_start(bp);
5211         }
5212
5213         return 0;
5214 }
5215
5216 static void
5217 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5218 {
5219         struct bnx2 *bp = netdev_priv(dev);
5220
5221         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5222         ering->rx_mini_max_pending = 0;
5223         ering->rx_jumbo_max_pending = 0;
5224
5225         ering->rx_pending = bp->rx_ring_size;
5226         ering->rx_mini_pending = 0;
5227         ering->rx_jumbo_pending = 0;
5228
5229         ering->tx_max_pending = MAX_TX_DESC_CNT;
5230         ering->tx_pending = bp->tx_ring_size;
5231 }
5232
5233 static int
5234 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5235 {
5236         struct bnx2 *bp = netdev_priv(dev);
5237
5238         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5239                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5240                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5241
5242                 return -EINVAL;
5243         }
5244         if (netif_running(bp->dev)) {
5245                 bnx2_netif_stop(bp);
5246                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5247                 bnx2_free_skbs(bp);
5248                 bnx2_free_mem(bp);
5249         }
5250
5251         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5252         bp->tx_ring_size = ering->tx_pending;
5253
5254         if (netif_running(bp->dev)) {
5255                 int rc;
5256
5257                 rc = bnx2_alloc_mem(bp);
5258                 if (rc)
5259                         return rc;
5260                 bnx2_init_nic(bp);
5261                 bnx2_netif_start(bp);
5262         }
5263
5264         return 0;
5265 }
5266
5267 static void
5268 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5269 {
5270         struct bnx2 *bp = netdev_priv(dev);
5271
5272         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5273         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5274         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5275 }
5276
5277 static int
5278 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5279 {
5280         struct bnx2 *bp = netdev_priv(dev);
5281
5282         bp->req_flow_ctrl = 0;
5283         if (epause->rx_pause)
5284                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5285         if (epause->tx_pause)
5286                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5287
5288         if (epause->autoneg) {
5289                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5290         }
5291         else {
5292                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5293         }
5294
5295         spin_lock_bh(&bp->phy_lock);
5296
5297         bnx2_setup_phy(bp);
5298
5299         spin_unlock_bh(&bp->phy_lock);
5300
5301         return 0;
5302 }
5303
5304 static u32
5305 bnx2_get_rx_csum(struct net_device *dev)
5306 {
5307         struct bnx2 *bp = netdev_priv(dev);
5308
5309         return bp->rx_csum;
5310 }
5311
5312 static int
5313 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5314 {
5315         struct bnx2 *bp = netdev_priv(dev);
5316
5317         bp->rx_csum = data;
5318         return 0;
5319 }
5320
5321 static int
5322 bnx2_set_tso(struct net_device *dev, u32 data)
5323 {
5324         struct bnx2 *bp = netdev_priv(dev);
5325
5326         if (data) {
5327                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5328                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5329                         dev->features |= NETIF_F_TSO6;
5330         } else
5331                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5332                                    NETIF_F_TSO_ECN);
5333         return 0;
5334 }
5335
5336 #define BNX2_NUM_STATS 46
5337
5338 static struct {
5339         char string[ETH_GSTRING_LEN];
5340 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5341         { "rx_bytes" },
5342         { "rx_error_bytes" },
5343         { "tx_bytes" },
5344         { "tx_error_bytes" },
5345         { "rx_ucast_packets" },
5346         { "rx_mcast_packets" },
5347         { "rx_bcast_packets" },
5348         { "tx_ucast_packets" },
5349         { "tx_mcast_packets" },
5350         { "tx_bcast_packets" },
5351         { "tx_mac_errors" },
5352         { "tx_carrier_errors" },
5353         { "rx_crc_errors" },
5354         { "rx_align_errors" },
5355         { "tx_single_collisions" },
5356         { "tx_multi_collisions" },
5357         { "tx_deferred" },
5358         { "tx_excess_collisions" },
5359         { "tx_late_collisions" },
5360         { "tx_total_collisions" },
5361         { "rx_fragments" },
5362         { "rx_jabbers" },
5363         { "rx_undersize_packets" },
5364         { "rx_oversize_packets" },
5365         { "rx_64_byte_packets" },
5366         { "rx_65_to_127_byte_packets" },
5367         { "rx_128_to_255_byte_packets" },
5368         { "rx_256_to_511_byte_packets" },
5369         { "rx_512_to_1023_byte_packets" },
5370         { "rx_1024_to_1522_byte_packets" },
5371         { "rx_1523_to_9022_byte_packets" },
5372         { "tx_64_byte_packets" },
5373         { "tx_65_to_127_byte_packets" },
5374         { "tx_128_to_255_byte_packets" },
5375         { "tx_256_to_511_byte_packets" },
5376         { "tx_512_to_1023_byte_packets" },
5377         { "tx_1024_to_1522_byte_packets" },
5378         { "tx_1523_to_9022_byte_packets" },
5379         { "rx_xon_frames" },
5380         { "rx_xoff_frames" },
5381         { "tx_xon_frames" },
5382         { "tx_xoff_frames" },
5383         { "rx_mac_ctrl_frames" },
5384         { "rx_filtered_packets" },
5385         { "rx_discards" },
5386         { "rx_fw_discards" },
5387 };
5388
5389 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5390
5391 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5392     STATS_OFFSET32(stat_IfHCInOctets_hi),
5393     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5394     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5395     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5396     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5397     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5398     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5399     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5400     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5401     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5402     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5403     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5404     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5405     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5406     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5407     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5408     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5409     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5410     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5411     STATS_OFFSET32(stat_EtherStatsCollisions),
5412     STATS_OFFSET32(stat_EtherStatsFragments),
5413     STATS_OFFSET32(stat_EtherStatsJabbers),
5414     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5415     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5416     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5417     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5418     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5419     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5420     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5421     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5422     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5423     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5424     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5425     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5426     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5427     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5428     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5429     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5430     STATS_OFFSET32(stat_XonPauseFramesReceived),
5431     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5432     STATS_OFFSET32(stat_OutXonSent),
5433     STATS_OFFSET32(stat_OutXoffSent),
5434     STATS_OFFSET32(stat_MacControlFramesReceived),
5435     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5436     STATS_OFFSET32(stat_IfInMBUFDiscards),
5437     STATS_OFFSET32(stat_FwRxDrop),
5438 };
5439
5440 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5441  * skipped because of errata.
5442  */
5443 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5444         8,0,8,8,8,8,8,8,8,8,
5445         4,0,4,4,4,4,4,4,4,4,
5446         4,4,4,4,4,4,4,4,4,4,
5447         4,4,4,4,4,4,4,4,4,4,
5448         4,4,4,4,4,4,
5449 };
5450
5451 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5452         8,0,8,8,8,8,8,8,8,8,
5453         4,4,4,4,4,4,4,4,4,4,
5454         4,4,4,4,4,4,4,4,4,4,
5455         4,4,4,4,4,4,4,4,4,4,
5456         4,4,4,4,4,4,
5457 };
5458
5459 #define BNX2_NUM_TESTS 6
5460
5461 static struct {
5462         char string[ETH_GSTRING_LEN];
5463 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5464         { "register_test (offline)" },
5465         { "memory_test (offline)" },
5466         { "loopback_test (offline)" },
5467         { "nvram_test (online)" },
5468         { "interrupt_test (online)" },
5469         { "link_test (online)" },
5470 };
5471
5472 static int
5473 bnx2_self_test_count(struct net_device *dev)
5474 {
5475         return BNX2_NUM_TESTS;
5476 }
5477
5478 static void
5479 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5480 {
5481         struct bnx2 *bp = netdev_priv(dev);
5482
5483         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5484         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5485                 int i;
5486
5487                 bnx2_netif_stop(bp);
5488                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5489                 bnx2_free_skbs(bp);
5490
5491                 if (bnx2_test_registers(bp) != 0) {
5492                         buf[0] = 1;
5493                         etest->flags |= ETH_TEST_FL_FAILED;
5494                 }
5495                 if (bnx2_test_memory(bp) != 0) {
5496                         buf[1] = 1;
5497                         etest->flags |= ETH_TEST_FL_FAILED;
5498                 }
5499                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5500                         etest->flags |= ETH_TEST_FL_FAILED;
5501
5502                 if (!netif_running(bp->dev)) {
5503                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5504                 }
5505                 else {
5506                         bnx2_init_nic(bp);
5507                         bnx2_netif_start(bp);
5508                 }
5509
5510                 /* wait for link up */
5511                 for (i = 0; i < 7; i++) {
5512                         if (bp->link_up)
5513                                 break;
5514                         msleep_interruptible(1000);
5515                 }
5516         }
5517
5518         if (bnx2_test_nvram(bp) != 0) {
5519                 buf[3] = 1;
5520                 etest->flags |= ETH_TEST_FL_FAILED;
5521         }
5522         if (bnx2_test_intr(bp) != 0) {
5523                 buf[4] = 1;
5524                 etest->flags |= ETH_TEST_FL_FAILED;
5525         }
5526
5527         if (bnx2_test_link(bp) != 0) {
5528                 buf[5] = 1;
5529                 etest->flags |= ETH_TEST_FL_FAILED;
5530
5531         }
5532 }
5533
5534 static void
5535 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5536 {
5537         switch (stringset) {
5538         case ETH_SS_STATS:
5539                 memcpy(buf, bnx2_stats_str_arr,
5540                         sizeof(bnx2_stats_str_arr));
5541                 break;
5542         case ETH_SS_TEST:
5543                 memcpy(buf, bnx2_tests_str_arr,
5544                         sizeof(bnx2_tests_str_arr));
5545                 break;
5546         }
5547 }
5548
5549 static int
5550 bnx2_get_stats_count(struct net_device *dev)
5551 {
5552         return BNX2_NUM_STATS;
5553 }
5554
5555 static void
5556 bnx2_get_ethtool_stats(struct net_device *dev,
5557                 struct ethtool_stats *stats, u64 *buf)
5558 {
5559         struct bnx2 *bp = netdev_priv(dev);
5560         int i;
5561         u32 *hw_stats = (u32 *) bp->stats_blk;
5562         u8 *stats_len_arr = NULL;
5563
5564         if (hw_stats == NULL) {
5565                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5566                 return;
5567         }
5568
5569         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5570             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5571             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5572             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5573                 stats_len_arr = bnx2_5706_stats_len_arr;
5574         else
5575                 stats_len_arr = bnx2_5708_stats_len_arr;
5576
5577         for (i = 0; i < BNX2_NUM_STATS; i++) {
5578                 if (stats_len_arr[i] == 0) {
5579                         /* skip this counter */
5580                         buf[i] = 0;
5581                         continue;
5582                 }
5583                 if (stats_len_arr[i] == 4) {
5584                         /* 4-byte counter */
5585                         buf[i] = (u64)
5586                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5587                         continue;
5588                 }
5589                 /* 8-byte counter */
5590                 buf[i] = (((u64) *(hw_stats +
5591                                         bnx2_stats_offset_arr[i])) << 32) +
5592                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5593         }
5594 }
5595
5596 static int
5597 bnx2_phys_id(struct net_device *dev, u32 data)
5598 {
5599         struct bnx2 *bp = netdev_priv(dev);
5600         int i;
5601         u32 save;
5602
5603         if (data == 0)
5604                 data = 2;
5605
5606         save = REG_RD(bp, BNX2_MISC_CFG);
5607         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5608
5609         for (i = 0; i < (data * 2); i++) {
5610                 if ((i % 2) == 0) {
5611                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5612                 }
5613                 else {
5614                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5615                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5616                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5617                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5618                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5619                                 BNX2_EMAC_LED_TRAFFIC);
5620                 }
5621                 msleep_interruptible(500);
5622                 if (signal_pending(current))
5623                         break;
5624         }
5625         REG_WR(bp, BNX2_EMAC_LED, 0);
5626         REG_WR(bp, BNX2_MISC_CFG, save);
5627         return 0;
5628 }
5629
5630 static int
5631 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5632 {
5633         struct bnx2 *bp = netdev_priv(dev);
5634
5635         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5636                 return (ethtool_op_set_tx_hw_csum(dev, data));
5637         else
5638                 return (ethtool_op_set_tx_csum(dev, data));
5639 }
5640
5641 static const struct ethtool_ops bnx2_ethtool_ops = {
5642         .get_settings           = bnx2_get_settings,
5643         .set_settings           = bnx2_set_settings,
5644         .get_drvinfo            = bnx2_get_drvinfo,
5645         .get_regs_len           = bnx2_get_regs_len,
5646         .get_regs               = bnx2_get_regs,
5647         .get_wol                = bnx2_get_wol,
5648         .set_wol                = bnx2_set_wol,
5649         .nway_reset             = bnx2_nway_reset,
5650         .get_link               = ethtool_op_get_link,
5651         .get_eeprom_len         = bnx2_get_eeprom_len,
5652         .get_eeprom             = bnx2_get_eeprom,
5653         .set_eeprom             = bnx2_set_eeprom,
5654         .get_coalesce           = bnx2_get_coalesce,
5655         .set_coalesce           = bnx2_set_coalesce,
5656         .get_ringparam          = bnx2_get_ringparam,
5657         .set_ringparam          = bnx2_set_ringparam,
5658         .get_pauseparam         = bnx2_get_pauseparam,
5659         .set_pauseparam         = bnx2_set_pauseparam,
5660         .get_rx_csum            = bnx2_get_rx_csum,
5661         .set_rx_csum            = bnx2_set_rx_csum,
5662         .get_tx_csum            = ethtool_op_get_tx_csum,
5663         .set_tx_csum            = bnx2_set_tx_csum,
5664         .get_sg                 = ethtool_op_get_sg,
5665         .set_sg                 = ethtool_op_set_sg,
5666         .get_tso                = ethtool_op_get_tso,
5667         .set_tso                = bnx2_set_tso,
5668         .self_test_count        = bnx2_self_test_count,
5669         .self_test              = bnx2_self_test,
5670         .get_strings            = bnx2_get_strings,
5671         .phys_id                = bnx2_phys_id,
5672         .get_stats_count        = bnx2_get_stats_count,
5673         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5674         .get_perm_addr          = ethtool_op_get_perm_addr,
5675 };
5676
5677 /* Called with rtnl_lock */
5678 static int
5679 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5680 {
5681         struct mii_ioctl_data *data = if_mii(ifr);
5682         struct bnx2 *bp = netdev_priv(dev);
5683         int err;
5684
5685         switch(cmd) {
5686         case SIOCGMIIPHY:
5687                 data->phy_id = bp->phy_addr;
5688
5689                 /* fallthru */
5690         case SIOCGMIIREG: {
5691                 u32 mii_regval;
5692
5693                 if (!netif_running(dev))
5694                         return -EAGAIN;
5695
5696                 spin_lock_bh(&bp->phy_lock);
5697                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5698                 spin_unlock_bh(&bp->phy_lock);
5699
5700                 data->val_out = mii_regval;
5701
5702                 return err;
5703         }
5704
5705         case SIOCSMIIREG:
5706                 if (!capable(CAP_NET_ADMIN))
5707                         return -EPERM;
5708
5709                 if (!netif_running(dev))
5710                         return -EAGAIN;
5711
5712                 spin_lock_bh(&bp->phy_lock);
5713                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5714                 spin_unlock_bh(&bp->phy_lock);
5715
5716                 return err;
5717
5718         default:
5719                 /* do nothing */
5720                 break;
5721         }
5722         return -EOPNOTSUPP;
5723 }
5724
5725 /* Called with rtnl_lock */
5726 static int
5727 bnx2_change_mac_addr(struct net_device *dev, void *p)
5728 {
5729         struct sockaddr *addr = p;
5730         struct bnx2 *bp = netdev_priv(dev);
5731
5732         if (!is_valid_ether_addr(addr->sa_data))
5733                 return -EINVAL;
5734
5735         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5736         if (netif_running(dev))
5737                 bnx2_set_mac_addr(bp);
5738
5739         return 0;
5740 }
5741
5742 /* Called with rtnl_lock */
5743 static int
5744 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5745 {
5746         struct bnx2 *bp = netdev_priv(dev);
5747
5748         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5749                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5750                 return -EINVAL;
5751
5752         dev->mtu = new_mtu;
5753         if (netif_running(dev)) {
5754                 bnx2_netif_stop(bp);
5755
5756                 bnx2_init_nic(bp);
5757
5758                 bnx2_netif_start(bp);
5759         }
5760         return 0;
5761 }
5762
5763 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5764 static void
5765 poll_bnx2(struct net_device *dev)
5766 {
5767         struct bnx2 *bp = netdev_priv(dev);
5768
5769         disable_irq(bp->pdev->irq);
5770         bnx2_interrupt(bp->pdev->irq, dev);
5771         enable_irq(bp->pdev->irq);
5772 }
5773 #endif
5774
5775 static void __devinit
5776 bnx2_get_5709_media(struct bnx2 *bp)
5777 {
5778         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5779         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5780         u32 strap;
5781
5782         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5783                 return;
5784         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5785                 bp->phy_flags |= PHY_SERDES_FLAG;
5786                 return;
5787         }
5788
5789         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5790                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5791         else
5792                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5793
5794         if (PCI_FUNC(bp->pdev->devfn) == 0) {
5795                 switch (strap) {
5796                 case 0x4:
5797                 case 0x5:
5798                 case 0x6:
5799                         bp->phy_flags |= PHY_SERDES_FLAG;
5800                         return;
5801                 }
5802         } else {
5803                 switch (strap) {
5804                 case 0x1:
5805                 case 0x2:
5806                 case 0x4:
5807                         bp->phy_flags |= PHY_SERDES_FLAG;
5808                         return;
5809                 }
5810         }
5811 }
5812
5813 static int __devinit
5814 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5815 {
5816         struct bnx2 *bp;
5817         unsigned long mem_len;
5818         int rc;
5819         u32 reg;
5820         u64 dma_mask, persist_dma_mask;
5821
5822         SET_MODULE_OWNER(dev);
5823         SET_NETDEV_DEV(dev, &pdev->dev);
5824         bp = netdev_priv(dev);
5825
5826         bp->flags = 0;
5827         bp->phy_flags = 0;
5828
5829         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5830         rc = pci_enable_device(pdev);
5831         if (rc) {
5832                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5833                 goto err_out;
5834         }
5835
5836         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5837                 dev_err(&pdev->dev,
5838                         "Cannot find PCI device base address, aborting.\n");
5839                 rc = -ENODEV;
5840                 goto err_out_disable;
5841         }
5842
5843         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5844         if (rc) {
5845                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5846                 goto err_out_disable;
5847         }
5848
5849         pci_set_master(pdev);
5850
5851         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5852         if (bp->pm_cap == 0) {
5853                 dev_err(&pdev->dev,
5854                         "Cannot find power management capability, aborting.\n");
5855                 rc = -EIO;
5856                 goto err_out_release;
5857         }
5858
5859         bp->dev = dev;
5860         bp->pdev = pdev;
5861
5862         spin_lock_init(&bp->phy_lock);
5863         INIT_WORK(&bp->reset_task, bnx2_reset_task);
5864
5865         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5866         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5867         dev->mem_end = dev->mem_start + mem_len;
5868         dev->irq = pdev->irq;
5869
5870         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5871
5872         if (!bp->regview) {
5873                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5874                 rc = -ENOMEM;
5875                 goto err_out_release;
5876         }
5877
5878         /* Configure byte swap and enable write to the reg_window registers.
5879          * Rely on CPU to do target byte swapping on big endian systems
5880          * The chip's target access swapping will not swap all accesses
5881          */
5882         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5883                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5884                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5885
5886         bnx2_set_power_state(bp, PCI_D0);
5887
5888         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5889
5890         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5891                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5892                 if (bp->pcix_cap == 0) {
5893                         dev_err(&pdev->dev,
5894                                 "Cannot find PCIX capability, aborting.\n");
5895                         rc = -EIO;
5896                         goto err_out_unmap;
5897                 }
5898         }
5899
5900         /* 5708 cannot support DMA addresses > 40-bit.  */
5901         if (CHIP_NUM(bp) == CHIP_NUM_5708)
5902                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5903         else
5904                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5905
5906         /* Configure DMA attributes. */
5907         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5908                 dev->features |= NETIF_F_HIGHDMA;
5909                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5910                 if (rc) {
5911                         dev_err(&pdev->dev,
5912                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5913                         goto err_out_unmap;
5914                 }
5915         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5916                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5917                 goto err_out_unmap;
5918         }
5919
5920         /* Get bus information. */
5921         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5922         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5923                 u32 clkreg;
5924
5925                 bp->flags |= PCIX_FLAG;
5926
5927                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5928
5929                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5930                 switch (clkreg) {
5931                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5932                         bp->bus_speed_mhz = 133;
5933                         break;
5934
5935                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5936                         bp->bus_speed_mhz = 100;
5937                         break;
5938
5939                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5940                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5941                         bp->bus_speed_mhz = 66;
5942                         break;
5943
5944                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5945                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5946                         bp->bus_speed_mhz = 50;
5947                         break;
5948
5949                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5950                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5951                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5952                         bp->bus_speed_mhz = 33;
5953                         break;
5954                 }
5955         }
5956         else {
5957                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5958                         bp->bus_speed_mhz = 66;
5959                 else
5960                         bp->bus_speed_mhz = 33;
5961         }
5962
5963         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5964                 bp->flags |= PCI_32BIT_FLAG;
5965
5966         /* 5706A0 may falsely detect SERR and PERR. */
5967         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5968                 reg = REG_RD(bp, PCI_COMMAND);
5969                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5970                 REG_WR(bp, PCI_COMMAND, reg);
5971         }
5972         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5973                 !(bp->flags & PCIX_FLAG)) {
5974
5975                 dev_err(&pdev->dev,
5976                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5977                 goto err_out_unmap;
5978         }
5979
5980         bnx2_init_nvram(bp);
5981
5982         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5983
5984         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5985             BNX2_SHM_HDR_SIGNATURE_SIG) {
5986                 u32 off = PCI_FUNC(pdev->devfn) << 2;
5987
5988                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5989         } else
5990                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5991
5992         /* Get the permanent MAC address.  First we need to make sure the
5993          * firmware is actually running.
5994          */
5995         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5996
5997         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5998             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5999                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6000                 rc = -ENODEV;
6001                 goto err_out_unmap;
6002         }
6003
6004         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6005
6006         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6007         bp->mac_addr[0] = (u8) (reg >> 8);
6008         bp->mac_addr[1] = (u8) reg;
6009
6010         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6011         bp->mac_addr[2] = (u8) (reg >> 24);
6012         bp->mac_addr[3] = (u8) (reg >> 16);
6013         bp->mac_addr[4] = (u8) (reg >> 8);
6014         bp->mac_addr[5] = (u8) reg;
6015
6016         bp->tx_ring_size = MAX_TX_DESC_CNT;
6017         bnx2_set_rx_ring_size(bp, 255);
6018
6019         bp->rx_csum = 1;
6020
6021         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6022
6023         bp->tx_quick_cons_trip_int = 20;
6024         bp->tx_quick_cons_trip = 20;
6025         bp->tx_ticks_int = 80;
6026         bp->tx_ticks = 80;
6027
6028         bp->rx_quick_cons_trip_int = 6;
6029         bp->rx_quick_cons_trip = 6;
6030         bp->rx_ticks_int = 18;
6031         bp->rx_ticks = 18;
6032
6033         bp->stats_ticks = 1000000 & 0xffff00;
6034
6035         bp->timer_interval =  HZ;
6036         bp->current_interval =  HZ;
6037
6038         bp->phy_addr = 1;
6039
6040         /* Disable WOL support if we are running on a SERDES chip. */
6041         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6042                 bnx2_get_5709_media(bp);
6043         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6044                 bp->phy_flags |= PHY_SERDES_FLAG;
6045
6046         if (bp->phy_flags & PHY_SERDES_FLAG) {
6047                 bp->flags |= NO_WOL_FLAG;
6048                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6049                         bp->phy_addr = 2;
6050                         reg = REG_RD_IND(bp, bp->shmem_base +
6051                                          BNX2_SHARED_HW_CFG_CONFIG);
6052                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6053                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6054                 }
6055         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6056                    CHIP_NUM(bp) == CHIP_NUM_5708)
6057                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6058         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6059                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6060
6061         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6062             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6063             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6064                 bp->flags |= NO_WOL_FLAG;
6065
6066         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6067                 bp->tx_quick_cons_trip_int =
6068                         bp->tx_quick_cons_trip;
6069                 bp->tx_ticks_int = bp->tx_ticks;
6070                 bp->rx_quick_cons_trip_int =
6071                         bp->rx_quick_cons_trip;
6072                 bp->rx_ticks_int = bp->rx_ticks;
6073                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6074                 bp->com_ticks_int = bp->com_ticks;
6075                 bp->cmd_ticks_int = bp->cmd_ticks;
6076         }
6077
6078         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6079          *
6080          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6081          * with byte enables disabled on the unused 32-bit word.  This is legal
6082          * but causes problems on the AMD 8132 which will eventually stop
6083          * responding after a while.
6084          *
6085          * AMD believes this incompatibility is unique to the 5706, and
6086          * prefers to locally disable MSI rather than globally disabling it.
6087          */
6088         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6089                 struct pci_dev *amd_8132 = NULL;
6090
6091                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6092                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6093                                                   amd_8132))) {
6094                         u8 rev;
6095
6096                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6097                         if (rev >= 0x10 && rev <= 0x13) {
6098                                 disable_msi = 1;
6099                                 pci_dev_put(amd_8132);
6100                                 break;
6101                         }
6102                 }
6103         }
6104
6105         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6106         bp->req_line_speed = 0;
6107         if (bp->phy_flags & PHY_SERDES_FLAG) {
6108                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6109
6110                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6111                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6112                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6113                         bp->autoneg = 0;
6114                         bp->req_line_speed = bp->line_speed = SPEED_1000;
6115                         bp->req_duplex = DUPLEX_FULL;
6116                 }
6117         }
6118         else {
6119                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6120         }
6121
6122         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6123
6124         init_timer(&bp->timer);
6125         bp->timer.expires = RUN_AT(bp->timer_interval);
6126         bp->timer.data = (unsigned long) bp;
6127         bp->timer.function = bnx2_timer;
6128
6129         return 0;
6130
6131 err_out_unmap:
6132         if (bp->regview) {
6133                 iounmap(bp->regview);
6134                 bp->regview = NULL;
6135         }
6136
6137 err_out_release:
6138         pci_release_regions(pdev);
6139
6140 err_out_disable:
6141         pci_disable_device(pdev);
6142         pci_set_drvdata(pdev, NULL);
6143
6144 err_out:
6145         return rc;
6146 }
6147
6148 static int __devinit
6149 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6150 {
6151         static int version_printed = 0;
6152         struct net_device *dev = NULL;
6153         struct bnx2 *bp;
6154         int rc, i;
6155
6156         if (version_printed++ == 0)
6157                 printk(KERN_INFO "%s", version);
6158
6159         /* dev zeroed in init_etherdev */
6160         dev = alloc_etherdev(sizeof(*bp));
6161
6162         if (!dev)
6163                 return -ENOMEM;
6164
6165         rc = bnx2_init_board(pdev, dev);
6166         if (rc < 0) {
6167                 free_netdev(dev);
6168                 return rc;
6169         }
6170
6171         dev->open = bnx2_open;
6172         dev->hard_start_xmit = bnx2_start_xmit;
6173         dev->stop = bnx2_close;
6174         dev->get_stats = bnx2_get_stats;
6175         dev->set_multicast_list = bnx2_set_rx_mode;
6176         dev->do_ioctl = bnx2_ioctl;
6177        &nb