[BNX2]: Add 40-bit DMA workaround for 5708.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91         char *name;
92 } board_info[] __devinitdata = {
93         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94         { "HP NC370T Multifunction Gigabit Server Adapter" },
95         { "HP NC370i Multifunction Gigabit Server Adapter" },
96         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97         { "HP NC370F Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120         { 0, }
121 };
122
123 static struct flash_spec flash_table[] =
124 {
125         /* Slow EEPROM */
126         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129          "EEPROM - slow"},
130         /* Expansion entry 0001 */
131         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134          "Entry 0001"},
135         /* Saifun SA25F010 (non-buffered flash) */
136         /* strap, cfg1, & write1 need updates */
137         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140          "Non-buffered flash (128kB)"},
141         /* Saifun SA25F020 (non-buffered flash) */
142         /* strap, cfg1, & write1 need updates */
143         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146          "Non-buffered flash (256kB)"},
147         /* Expansion entry 0100 */
148         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0100"},
152         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162         /* Saifun SA25F005 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167          "Non-buffered flash (64kB)"},
168         /* Fast EEPROM */
169         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172          "EEPROM - fast"},
173         /* Expansion entry 1001 */
174         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 1001"},
178         /* Expansion entry 1010 */
179         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1010"},
183         /* ATMEL AT45DB011B (buffered flash) */
184         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187          "Buffered flash (128kB)"},
188         /* Expansion entry 1100 */
189         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192          "Entry 1100"},
193         /* Expansion entry 1101 */
194         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1101"},
198         /* Ateml Expansion entry 1110 */
199         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1110 (Atmel)"},
203         /* ATMEL AT45DB021B (buffered flash) */
204         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207          "Buffered flash (256kB)"},
208 };
209
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 {
214         u32 diff;
215
216         smp_mb();
217
218         /* The ring uses 256 indices for 255 entries, one of them
219          * needs to be skipped.
220          */
221         diff = bp->tx_prod - bp->tx_cons;
222         if (unlikely(diff >= TX_DESC_CNT)) {
223                 diff &= 0xffff;
224                 if (diff == TX_DESC_CNT)
225                         diff = MAX_TX_DESC_CNT;
226         }
227         return (bp->tx_ring_size - diff);
228 }
229
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232 {
233         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235 }
236
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239 {
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242 }
243
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246 {
247         offset += cid_addr;
248         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249                 int i;
250
251                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254                 for (i = 0; i < 5; i++) {
255                         u32 val;
256                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258                                 break;
259                         udelay(5);
260                 }
261         } else {
262                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263                 REG_WR(bp, BNX2_CTX_DATA, val);
264         }
265 }
266
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 {
270         u32 val1;
271         int i, ret;
272
273         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280                 udelay(40);
281         }
282
283         val1 = (bp->phy_addr << 21) | (reg << 16) |
284                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285                 BNX2_EMAC_MDIO_COMM_START_BUSY;
286         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288         for (i = 0; i < 50; i++) {
289                 udelay(10);
290
291                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293                         udelay(5);
294
295                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298                         break;
299                 }
300         }
301
302         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303                 *val = 0x0;
304                 ret = -EBUSY;
305         }
306         else {
307                 *val = val1;
308                 ret = 0;
309         }
310
311         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         return ret;
322 }
323
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 {
327         u32 val1;
328         int i, ret;
329
330         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337                 udelay(40);
338         }
339
340         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
344
345         for (i = 0; i < 50; i++) {
346                 udelay(10);
347
348                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350                         udelay(5);
351                         break;
352                 }
353         }
354
355         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356                 ret = -EBUSY;
357         else
358                 ret = 0;
359
360         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367                 udelay(40);
368         }
369
370         return ret;
371 }
372
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
375 {
376         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379 }
380
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
383 {
384         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
392 }
393
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
396 {
397         atomic_inc(&bp->intr_sem);
398         bnx2_disable_int(bp);
399         synchronize_irq(bp->pdev->irq);
400 }
401
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
404 {
405         bnx2_disable_int_sync(bp);
406         if (netif_running(bp->dev)) {
407                 netif_poll_disable(bp->dev);
408                 netif_tx_disable(bp->dev);
409                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410         }
411 }
412
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
415 {
416         if (atomic_dec_and_test(&bp->intr_sem)) {
417                 if (netif_running(bp->dev)) {
418                         netif_wake_queue(bp->dev);
419                         netif_poll_enable(bp->dev);
420                         bnx2_enable_int(bp);
421                 }
422         }
423 }
424
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
427 {
428         int i;
429
430         for (i = 0; i < bp->ctx_pages; i++) {
431                 if (bp->ctx_blk[i]) {
432                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433                                             bp->ctx_blk[i],
434                                             bp->ctx_blk_mapping[i]);
435                         bp->ctx_blk[i] = NULL;
436                 }
437         }
438         if (bp->status_blk) {
439                 pci_free_consistent(bp->pdev, bp->status_stats_size,
440                                     bp->status_blk, bp->status_blk_mapping);
441                 bp->status_blk = NULL;
442                 bp->stats_blk = NULL;
443         }
444         if (bp->tx_desc_ring) {
445                 pci_free_consistent(bp->pdev,
446                                     sizeof(struct tx_bd) * TX_DESC_CNT,
447                                     bp->tx_desc_ring, bp->tx_desc_mapping);
448                 bp->tx_desc_ring = NULL;
449         }
450         kfree(bp->tx_buf_ring);
451         bp->tx_buf_ring = NULL;
452         for (i = 0; i < bp->rx_max_ring; i++) {
453                 if (bp->rx_desc_ring[i])
454                         pci_free_consistent(bp->pdev,
455                                             sizeof(struct rx_bd) * RX_DESC_CNT,
456                                             bp->rx_desc_ring[i],
457                                             bp->rx_desc_mapping[i]);
458                 bp->rx_desc_ring[i] = NULL;
459         }
460         vfree(bp->rx_buf_ring);
461         bp->rx_buf_ring = NULL;
462 }
463
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
466 {
467         int i, status_blk_size;
468
469         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470                                   GFP_KERNEL);
471         if (bp->tx_buf_ring == NULL)
472                 return -ENOMEM;
473
474         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475                                                 sizeof(struct tx_bd) *
476                                                 TX_DESC_CNT,
477                                                 &bp->tx_desc_mapping);
478         if (bp->tx_desc_ring == NULL)
479                 goto alloc_mem_err;
480
481         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482                                   bp->rx_max_ring);
483         if (bp->rx_buf_ring == NULL)
484                 goto alloc_mem_err;
485
486         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487                                    bp->rx_max_ring);
488
489         for (i = 0; i < bp->rx_max_ring; i++) {
490                 bp->rx_desc_ring[i] =
491                         pci_alloc_consistent(bp->pdev,
492                                              sizeof(struct rx_bd) * RX_DESC_CNT,
493                                              &bp->rx_desc_mapping[i]);
494                 if (bp->rx_desc_ring[i] == NULL)
495                         goto alloc_mem_err;
496
497         }
498
499         /* Combine status and statistics blocks into one allocation. */
500         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501         bp->status_stats_size = status_blk_size +
502                                 sizeof(struct statistics_block);
503
504         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505                                               &bp->status_blk_mapping);
506         if (bp->status_blk == NULL)
507                 goto alloc_mem_err;
508
509         memset(bp->status_blk, 0, bp->status_stats_size);
510
511         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512                                   status_blk_size);
513
514         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
515
516         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518                 if (bp->ctx_pages == 0)
519                         bp->ctx_pages = 1;
520                 for (i = 0; i < bp->ctx_pages; i++) {
521                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522                                                 BCM_PAGE_SIZE,
523                                                 &bp->ctx_blk_mapping[i]);
524                         if (bp->ctx_blk[i] == NULL)
525                                 goto alloc_mem_err;
526                 }
527         }
528         return 0;
529
530 alloc_mem_err:
531         bnx2_free_mem(bp);
532         return -ENOMEM;
533 }
534
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
537 {
538         u32 fw_link_status = 0;
539
540         if (bp->link_up) {
541                 u32 bmsr;
542
543                 switch (bp->line_speed) {
544                 case SPEED_10:
545                         if (bp->duplex == DUPLEX_HALF)
546                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
547                         else
548                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
549                         break;
550                 case SPEED_100:
551                         if (bp->duplex == DUPLEX_HALF)
552                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
553                         else
554                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
555                         break;
556                 case SPEED_1000:
557                         if (bp->duplex == DUPLEX_HALF)
558                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559                         else
560                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561                         break;
562                 case SPEED_2500:
563                         if (bp->duplex == DUPLEX_HALF)
564                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565                         else
566                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567                         break;
568                 }
569
570                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572                 if (bp->autoneg) {
573                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
576                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581                         else
582                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583                 }
584         }
585         else
586                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 }
590
591 static void
592 bnx2_report_link(struct bnx2 *bp)
593 {
594         if (bp->link_up) {
595                 netif_carrier_on(bp->dev);
596                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598                 printk("%d Mbps ", bp->line_speed);
599
600                 if (bp->duplex == DUPLEX_FULL)
601                         printk("full duplex");
602                 else
603                         printk("half duplex");
604
605                 if (bp->flow_ctrl) {
606                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
607                                 printk(", receive ");
608                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
609                                         printk("& transmit ");
610                         }
611                         else {
612                                 printk(", transmit ");
613                         }
614                         printk("flow control ON");
615                 }
616                 printk("\n");
617         }
618         else {
619                 netif_carrier_off(bp->dev);
620                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621         }
622
623         bnx2_report_fw_link(bp);
624 }
625
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628 {
629         u32 local_adv, remote_adv;
630
631         bp->flow_ctrl = 0;
632         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635                 if (bp->duplex == DUPLEX_FULL) {
636                         bp->flow_ctrl = bp->req_flow_ctrl;
637                 }
638                 return;
639         }
640
641         if (bp->duplex != DUPLEX_FULL) {
642                 return;
643         }
644
645         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647                 u32 val;
648
649                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651                         bp->flow_ctrl |= FLOW_CTRL_TX;
652                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653                         bp->flow_ctrl |= FLOW_CTRL_RX;
654                 return;
655         }
656
657         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658         bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660         if (bp->phy_flags & PHY_SERDES_FLAG) {
661                 u32 new_local_adv = 0;
662                 u32 new_remote_adv = 0;
663
664                 if (local_adv & ADVERTISE_1000XPAUSE)
665                         new_local_adv |= ADVERTISE_PAUSE_CAP;
666                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
668                 if (remote_adv & ADVERTISE_1000XPAUSE)
669                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
670                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673                 local_adv = new_local_adv;
674                 remote_adv = new_remote_adv;
675         }
676
677         /* See Table 28B-3 of 802.3ab-1999 spec. */
678         if (local_adv & ADVERTISE_PAUSE_CAP) {
679                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
681                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682                         }
683                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684                                 bp->flow_ctrl = FLOW_CTRL_RX;
685                         }
686                 }
687                 else {
688                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
689                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690                         }
691                 }
692         }
693         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697                         bp->flow_ctrl = FLOW_CTRL_TX;
698                 }
699         }
700 }
701
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
704 {
705         u32 val;
706
707         bp->link_up = 1;
708         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710                 case BCM5708S_1000X_STAT1_SPEED_10:
711                         bp->line_speed = SPEED_10;
712                         break;
713                 case BCM5708S_1000X_STAT1_SPEED_100:
714                         bp->line_speed = SPEED_100;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_1G:
717                         bp->line_speed = SPEED_1000;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_2G5:
720                         bp->line_speed = SPEED_2500;
721                         break;
722         }
723         if (val & BCM5708S_1000X_STAT1_FD)
724                 bp->duplex = DUPLEX_FULL;
725         else
726                 bp->duplex = DUPLEX_HALF;
727
728         return 0;
729 }
730
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
733 {
734         u32 bmcr, local_adv, remote_adv, common;
735
736         bp->link_up = 1;
737         bp->line_speed = SPEED_1000;
738
739         bnx2_read_phy(bp, MII_BMCR, &bmcr);
740         if (bmcr & BMCR_FULLDPLX) {
741                 bp->duplex = DUPLEX_FULL;
742         }
743         else {
744                 bp->duplex = DUPLEX_HALF;
745         }
746
747         if (!(bmcr & BMCR_ANENABLE)) {
748                 return 0;
749         }
750
751         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752         bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754         common = local_adv & remote_adv;
755         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757                 if (common & ADVERTISE_1000XFULL) {
758                         bp->duplex = DUPLEX_FULL;
759                 }
760                 else {
761                         bp->duplex = DUPLEX_HALF;
762                 }
763         }
764
765         return 0;
766 }
767
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
770 {
771         u32 bmcr;
772
773         bnx2_read_phy(bp, MII_BMCR, &bmcr);
774         if (bmcr & BMCR_ANENABLE) {
775                 u32 local_adv, remote_adv, common;
776
777                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780                 common = local_adv & (remote_adv >> 2);
781                 if (common & ADVERTISE_1000FULL) {
782                         bp->line_speed = SPEED_1000;
783                         bp->duplex = DUPLEX_FULL;
784                 }
785                 else if (common & ADVERTISE_1000HALF) {
786                         bp->line_speed = SPEED_1000;
787                         bp->duplex = DUPLEX_HALF;
788                 }
789                 else {
790                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793                         common = local_adv & remote_adv;
794                         if (common & ADVERTISE_100FULL) {
795                                 bp->line_speed = SPEED_100;
796                                 bp->duplex = DUPLEX_FULL;
797                         }
798                         else if (common & ADVERTISE_100HALF) {
799                                 bp->line_speed = SPEED_100;
800                                 bp->duplex = DUPLEX_HALF;
801                         }
802                         else if (common & ADVERTISE_10FULL) {
803                                 bp->line_speed = SPEED_10;
804                                 bp->duplex = DUPLEX_FULL;
805                         }
806                         else if (common & ADVERTISE_10HALF) {
807                                 bp->line_speed = SPEED_10;
808                                 bp->duplex = DUPLEX_HALF;
809                         }
810                         else {
811                                 bp->line_speed = 0;
812                                 bp->link_up = 0;
813                         }
814                 }
815         }
816         else {
817                 if (bmcr & BMCR_SPEED100) {
818                         bp->line_speed = SPEED_100;
819                 }
820                 else {
821                         bp->line_speed = SPEED_10;
822                 }
823                 if (bmcr & BMCR_FULLDPLX) {
824                         bp->duplex = DUPLEX_FULL;
825                 }
826                 else {
827                         bp->duplex = DUPLEX_HALF;
828                 }
829         }
830
831         return 0;
832 }
833
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
836 {
837         u32 val;
838
839         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841                 (bp->duplex == DUPLEX_HALF)) {
842                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843         }
844
845         /* Configure the EMAC mode register. */
846         val = REG_RD(bp, BNX2_EMAC_MODE);
847
848         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850                 BNX2_EMAC_MODE_25G_MODE);
851
852         if (bp->link_up) {
853                 switch (bp->line_speed) {
854                         case SPEED_10:
855                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
857                                         break;
858                                 }
859                                 /* fall through */
860                         case SPEED_100:
861                                 val |= BNX2_EMAC_MODE_PORT_MII;
862                                 break;
863                         case SPEED_2500:
864                                 val |= BNX2_EMAC_MODE_25G_MODE;
865                                 /* fall through */
866                         case SPEED_1000:
867                                 val |= BNX2_EMAC_MODE_PORT_GMII;
868                                 break;
869                 }
870         }
871         else {
872                 val |= BNX2_EMAC_MODE_PORT_GMII;
873         }
874
875         /* Set the MAC to operate in the appropriate duplex mode. */
876         if (bp->duplex == DUPLEX_HALF)
877                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878         REG_WR(bp, BNX2_EMAC_MODE, val);
879
880         /* Enable/disable rx PAUSE. */
881         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883         if (bp->flow_ctrl & FLOW_CTRL_RX)
884                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887         /* Enable/disable tx PAUSE. */
888         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891         if (bp->flow_ctrl & FLOW_CTRL_TX)
892                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895         /* Acknowledge the interrupt. */
896         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898         return 0;
899 }
900
901 static int
902 bnx2_set_link(struct bnx2 *bp)
903 {
904         u32 bmsr;
905         u8 link_up;
906
907         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908                 bp->link_up = 1;
909                 return 0;
910         }
911
912         link_up = bp->link_up;
913
914         bnx2_read_phy(bp, MII_BMSR, &bmsr);
915         bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919                 u32 val;
920
921                 val = REG_RD(bp, BNX2_EMAC_STATUS);
922                 if (val & BNX2_EMAC_STATUS_LINK)
923                         bmsr |= BMSR_LSTATUS;
924                 else
925                         bmsr &= ~BMSR_LSTATUS;
926         }
927
928         if (bmsr & BMSR_LSTATUS) {
929                 bp->link_up = 1;
930
931                 if (bp->phy_flags & PHY_SERDES_FLAG) {
932                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
933                                 bnx2_5706s_linkup(bp);
934                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935                                 bnx2_5708s_linkup(bp);
936                 }
937                 else {
938                         bnx2_copper_linkup(bp);
939                 }
940                 bnx2_resolve_flow_ctrl(bp);
941         }
942         else {
943                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944                         (bp->autoneg & AUTONEG_SPEED)) {
945
946                         u32 bmcr;
947
948                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
949                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
950                         if (!(bmcr & BMCR_ANENABLE)) {
951                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
952                                         BMCR_ANENABLE);
953                         }
954                 }
955                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956                 bp->link_up = 0;
957         }
958
959         if (bp->link_up != link_up) {
960                 bnx2_report_link(bp);
961         }
962
963         bnx2_set_mac_link(bp);
964
965         return 0;
966 }
967
968 static int
969 bnx2_reset_phy(struct bnx2 *bp)
970 {
971         int i;
972         u32 reg;
973
974         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976 #define PHY_RESET_MAX_WAIT 100
977         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978                 udelay(10);
979
980                 bnx2_read_phy(bp, MII_BMCR, &reg);
981                 if (!(reg & BMCR_RESET)) {
982                         udelay(20);
983                         break;
984                 }
985         }
986         if (i == PHY_RESET_MAX_WAIT) {
987                 return -EBUSY;
988         }
989         return 0;
990 }
991
992 static u32
993 bnx2_phy_get_pause_adv(struct bnx2 *bp)
994 {
995         u32 adv = 0;
996
997         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001                         adv = ADVERTISE_1000XPAUSE;
1002                 }
1003                 else {
1004                         adv = ADVERTISE_PAUSE_CAP;
1005                 }
1006         }
1007         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009                         adv = ADVERTISE_1000XPSE_ASYM;
1010                 }
1011                 else {
1012                         adv = ADVERTISE_PAUSE_ASYM;
1013                 }
1014         }
1015         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018                 }
1019                 else {
1020                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021                 }
1022         }
1023         return adv;
1024 }
1025
1026 static int
1027 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 {
1029         u32 adv, bmcr, up1;
1030         u32 new_adv = 0;
1031
1032         if (!(bp->autoneg & AUTONEG_SPEED)) {
1033                 u32 new_bmcr;
1034                 int force_link_down = 0;
1035
1036                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041                 new_bmcr |= BMCR_SPEED1000;
1042                 if (bp->req_line_speed == SPEED_2500) {
1043                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045                         if (!(up1 & BCM5708S_UP1_2G5)) {
1046                                 up1 |= BCM5708S_UP1_2G5;
1047                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048                                 force_link_down = 1;
1049                         }
1050                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1051                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052                         if (up1 & BCM5708S_UP1_2G5) {
1053                                 up1 &= ~BCM5708S_UP1_2G5;
1054                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055                                 force_link_down = 1;
1056                         }
1057                 }
1058
1059                 if (bp->req_duplex == DUPLEX_FULL) {
1060                         adv |= ADVERTISE_1000XFULL;
1061                         new_bmcr |= BMCR_FULLDPLX;
1062                 }
1063                 else {
1064                         adv |= ADVERTISE_1000XHALF;
1065                         new_bmcr &= ~BMCR_FULLDPLX;
1066                 }
1067                 if ((new_bmcr != bmcr) || (force_link_down)) {
1068                         /* Force a link down visible on the other side */
1069                         if (bp->link_up) {
1070                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071                                                ~(ADVERTISE_1000XFULL |
1072                                                  ADVERTISE_1000XHALF));
1073                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074                                         BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076                                 bp->link_up = 0;
1077                                 netif_carrier_off(bp->dev);
1078                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079                                 bnx2_report_link(bp);
1080                         }
1081                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1082                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083                 }
1084                 return 0;
1085         }
1086
1087         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089                 up1 |= BCM5708S_UP1_2G5;
1090                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091         }
1092
1093         if (bp->advertising & ADVERTISED_1000baseT_Full)
1094                 new_adv |= ADVERTISE_1000XFULL;
1095
1096         new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101         bp->serdes_an_pending = 0;
1102         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103                 /* Force a link down visible on the other side */
1104                 if (bp->link_up) {
1105                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1106                         spin_unlock_bh(&bp->phy_lock);
1107                         msleep(20);
1108                         spin_lock_bh(&bp->phy_lock);
1109                 }
1110
1111                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113                         BMCR_ANENABLE);
1114                 /* Speed up link-up time when the link partner
1115                  * does not autonegotiate which is very common
1116                  * in blade servers. Some blade servers use
1117                  * IPMI for kerboard input and it's important
1118                  * to minimize link disruptions. Autoneg. involves
1119                  * exchanging base pages plus 3 next pages and
1120                  * normally completes in about 120 msec.
1121                  */
1122                 bp->current_interval = SERDES_AN_TIMEOUT;
1123                 bp->serdes_an_pending = 1;
1124                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1125         }
1126
1127         return 0;
1128 }
1129
1130 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1131         (ADVERTISED_1000baseT_Full)
1132
1133 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1134         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1135         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1136         ADVERTISED_1000baseT_Full)
1137
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1140
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143 static int
1144 bnx2_setup_copper_phy(struct bnx2 *bp)
1145 {
1146         u32 bmcr;
1147         u32 new_bmcr;
1148
1149         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151         if (bp->autoneg & AUTONEG_SPEED) {
1152                 u32 adv_reg, adv1000_reg;
1153                 u32 new_adv_reg = 0;
1154                 u32 new_adv1000_reg = 0;
1155
1156                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158                         ADVERTISE_PAUSE_ASYM);
1159
1160                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161                 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163                 if (bp->advertising & ADVERTISED_10baseT_Half)
1164                         new_adv_reg |= ADVERTISE_10HALF;
1165                 if (bp->advertising & ADVERTISED_10baseT_Full)
1166                         new_adv_reg |= ADVERTISE_10FULL;
1167                 if (bp->advertising & ADVERTISED_100baseT_Half)
1168                         new_adv_reg |= ADVERTISE_100HALF;
1169                 if (bp->advertising & ADVERTISED_100baseT_Full)
1170                         new_adv_reg |= ADVERTISE_100FULL;
1171                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172                         new_adv1000_reg |= ADVERTISE_1000FULL;
1173
1174                 new_adv_reg |= ADVERTISE_CSMA;
1175
1176                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178                 if ((adv1000_reg != new_adv1000_reg) ||
1179                         (adv_reg != new_adv_reg) ||
1180                         ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185                                 BMCR_ANENABLE);
1186                 }
1187                 else if (bp->link_up) {
1188                         /* Flow ctrl may have changed from auto to forced */
1189                         /* or vice-versa. */
1190
1191                         bnx2_resolve_flow_ctrl(bp);
1192                         bnx2_set_mac_link(bp);
1193                 }
1194                 return 0;
1195         }
1196
1197         new_bmcr = 0;
1198         if (bp->req_line_speed == SPEED_100) {
1199                 new_bmcr |= BMCR_SPEED100;
1200         }
1201         if (bp->req_duplex == DUPLEX_FULL) {
1202                 new_bmcr |= BMCR_FULLDPLX;
1203         }
1204         if (new_bmcr != bmcr) {
1205                 u32 bmsr;
1206
1207                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209
1210                 if (bmsr & BMSR_LSTATUS) {
1211                         /* Force link down */
1212                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1213                         spin_unlock_bh(&bp->phy_lock);
1214                         msleep(50);
1215                         spin_lock_bh(&bp->phy_lock);
1216
1217                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219                 }
1220
1221                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223                 /* Normally, the new speed is setup after the link has
1224                  * gone down and up again. In some cases, link will not go
1225                  * down so we need to set up the new speed here.
1226                  */
1227                 if (bmsr & BMSR_LSTATUS) {
1228                         bp->line_speed = bp->req_line_speed;
1229                         bp->duplex = bp->req_duplex;
1230                         bnx2_resolve_flow_ctrl(bp);
1231                         bnx2_set_mac_link(bp);
1232                 }
1233         }
1234         return 0;
1235 }
1236
1237 static int
1238 bnx2_setup_phy(struct bnx2 *bp)
1239 {
1240         if (bp->loopback == MAC_LOOPBACK)
1241                 return 0;
1242
1243         if (bp->phy_flags & PHY_SERDES_FLAG) {
1244                 return (bnx2_setup_serdes_phy(bp));
1245         }
1246         else {
1247                 return (bnx2_setup_copper_phy(bp));
1248         }
1249 }
1250
1251 static int
1252 bnx2_init_5708s_phy(struct bnx2 *bp)
1253 {
1254         u32 val;
1255
1256         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270                 val |= BCM5708S_UP1_2G5;
1271                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272         }
1273
1274         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1275             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1277                 /* increase tx signal amplitude */
1278                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279                                BCM5708S_BLK_ADDR_TX_MISC);
1280                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284         }
1285
1286         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1287               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289         if (val) {
1290                 u32 is_backplane;
1291
1292                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1293                                           BNX2_SHARED_HW_CFG_CONFIG);
1294                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296                                        BCM5708S_BLK_ADDR_TX_MISC);
1297                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299                                        BCM5708S_BLK_ADDR_DIG);
1300                 }
1301         }
1302         return 0;
1303 }
1304
1305 static int
1306 bnx2_init_5706s_phy(struct bnx2 *bp)
1307 {
1308         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
1310         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1312
1313         if (bp->dev->mtu > 1500) {
1314                 u32 val;
1315
1316                 /* Set extended packet length bit */
1317                 bnx2_write_phy(bp, 0x18, 0x7);
1318                 bnx2_read_phy(bp, 0x18, &val);
1319                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322                 bnx2_read_phy(bp, 0x1c, &val);
1323                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324         }
1325         else {
1326                 u32 val;
1327
1328                 bnx2_write_phy(bp, 0x18, 0x7);
1329                 bnx2_read_phy(bp, 0x18, &val);
1330                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333                 bnx2_read_phy(bp, 0x1c, &val);
1334                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int
1341 bnx2_init_copper_phy(struct bnx2 *bp)
1342 {
1343         u32 val;
1344
1345         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346                 bnx2_write_phy(bp, 0x18, 0x0c00);
1347                 bnx2_write_phy(bp, 0x17, 0x000a);
1348                 bnx2_write_phy(bp, 0x15, 0x310b);
1349                 bnx2_write_phy(bp, 0x17, 0x201f);
1350                 bnx2_write_phy(bp, 0x15, 0x9506);
1351                 bnx2_write_phy(bp, 0x17, 0x401f);
1352                 bnx2_write_phy(bp, 0x15, 0x14e2);
1353                 bnx2_write_phy(bp, 0x18, 0x0400);
1354         }
1355
1356         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1359                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360                 val &= ~(1 << 8);
1361                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362         }
1363
1364         if (bp->dev->mtu > 1500) {
1365                 /* Set extended packet length bit */
1366                 bnx2_write_phy(bp, 0x18, 0x7);
1367                 bnx2_read_phy(bp, 0x18, &val);
1368                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370                 bnx2_read_phy(bp, 0x10, &val);
1371                 bnx2_write_phy(bp, 0x10, val | 0x1);
1372         }
1373         else {
1374                 bnx2_write_phy(bp, 0x18, 0x7);
1375                 bnx2_read_phy(bp, 0x18, &val);
1376                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378                 bnx2_read_phy(bp, 0x10, &val);
1379                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380         }
1381
1382         /* ethernet@wirespeed */
1383         bnx2_write_phy(bp, 0x18, 0x7007);
1384         bnx2_read_phy(bp, 0x18, &val);
1385         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386         return 0;
1387 }
1388
1389
1390 static int
1391 bnx2_init_phy(struct bnx2 *bp)
1392 {
1393         u32 val;
1394         int rc = 0;
1395
1396         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401         bnx2_reset_phy(bp);
1402
1403         bnx2_read_phy(bp, MII_PHYSID1, &val);
1404         bp->phy_id = val << 16;
1405         bnx2_read_phy(bp, MII_PHYSID2, &val);
1406         bp->phy_id |= val & 0xffff;
1407
1408         if (bp->phy_flags & PHY_SERDES_FLAG) {
1409                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410                         rc = bnx2_init_5706s_phy(bp);
1411                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412                         rc = bnx2_init_5708s_phy(bp);
1413         }
1414         else {
1415                 rc = bnx2_init_copper_phy(bp);
1416         }
1417
1418         bnx2_setup_phy(bp);
1419
1420         return rc;
1421 }
1422
1423 static int
1424 bnx2_set_mac_loopback(struct bnx2 *bp)
1425 {
1426         u32 mac_mode;
1427
1428         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432         bp->link_up = 1;
1433         return 0;
1434 }
1435
1436 static int bnx2_test_link(struct bnx2 *);
1437
1438 static int
1439 bnx2_set_phy_loopback(struct bnx2 *bp)
1440 {
1441         u32 mac_mode;
1442         int rc, i;
1443
1444         spin_lock_bh(&bp->phy_lock);
1445         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446                             BMCR_SPEED1000);
1447         spin_unlock_bh(&bp->phy_lock);
1448         if (rc)
1449                 return rc;
1450
1451         for (i = 0; i < 10; i++) {
1452                 if (bnx2_test_link(bp) == 0)
1453                         break;
1454                 msleep(100);
1455         }
1456
1457         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1460                       BNX2_EMAC_MODE_25G_MODE);
1461
1462         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464         bp->link_up = 1;
1465         return 0;
1466 }
1467
1468 static int
1469 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1470 {
1471         int i;
1472         u32 val;
1473
1474         bp->fw_wr_seq++;
1475         msg_data |= bp->fw_wr_seq;
1476
1477         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1478
1479         /* wait for an acknowledgement. */
1480         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481                 msleep(10);
1482
1483                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1484
1485                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486                         break;
1487         }
1488         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489                 return 0;
1490
1491         /* If we timed out, inform the firmware that this is the case. */
1492         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493                 if (!silent)
1494                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495                                             "%x\n", msg_data);
1496
1497                 msg_data &= ~BNX2_DRV_MSG_CODE;
1498                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
1500                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1501
1502                 return -EBUSY;
1503         }
1504
1505         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506                 return -EIO;
1507
1508         return 0;
1509 }
1510
1511 static int
1512 bnx2_init_5709_context(struct bnx2 *bp)
1513 {
1514         int i, ret = 0;
1515         u32 val;
1516
1517         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518         val |= (BCM_PAGE_BITS - 8) << 16;
1519         REG_WR(bp, BNX2_CTX_COMMAND, val);
1520         for (i = 0; i < bp->ctx_pages; i++) {
1521                 int j;
1522
1523                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527                        (u64) bp->ctx_blk_mapping[i] >> 32);
1528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530                 for (j = 0; j < 10; j++) {
1531
1532                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534                                 break;
1535                         udelay(5);
1536                 }
1537                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538                         ret = -EBUSY;
1539                         break;
1540                 }
1541         }
1542         return ret;
1543 }
1544
1545 static void
1546 bnx2_init_context(struct bnx2 *bp)
1547 {
1548         u32 vcid;
1549
1550         vcid = 96;
1551         while (vcid) {
1552                 u32 vcid_addr, pcid_addr, offset;
1553
1554                 vcid--;
1555
1556                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557                         u32 new_vcid;
1558
1559                         vcid_addr = GET_PCID_ADDR(vcid);
1560                         if (vcid & 0x8) {
1561                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562                         }
1563                         else {
1564                                 new_vcid = vcid;
1565                         }
1566                         pcid_addr = GET_PCID_ADDR(new_vcid);
1567                 }
1568                 else {
1569                         vcid_addr = GET_CID_ADDR(vcid);
1570                         pcid_addr = vcid_addr;
1571                 }
1572
1573                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576                 /* Zero out the context. */
1577                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578                         CTX_WR(bp, 0x00, offset, 0);
1579                 }
1580
1581                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583         }
1584 }
1585
1586 static int
1587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588 {
1589         u16 *good_mbuf;
1590         u32 good_mbuf_cnt;
1591         u32 val;
1592
1593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594         if (good_mbuf == NULL) {
1595                 printk(KERN_ERR PFX "Failed to allocate memory in "
1596                                     "bnx2_alloc_bad_rbuf\n");
1597                 return -ENOMEM;
1598         }
1599
1600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603         good_mbuf_cnt = 0;
1604
1605         /* Allocate a bunch of mbufs and save the good ones in an array. */
1606         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614                 /* The addresses with Bit 9 set are bad memory blocks. */
1615                 if (!(val & (1 << 9))) {
1616                         good_mbuf[good_mbuf_cnt] = (u16) val;
1617                         good_mbuf_cnt++;
1618                 }
1619
1620                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621         }
1622
1623         /* Free the good ones back to the mbuf pool thus discarding
1624          * all the bad ones. */
1625         while (good_mbuf_cnt) {
1626                 good_mbuf_cnt--;
1627
1628                 val = good_mbuf[good_mbuf_cnt];
1629                 val = (val << 9) | val | 1;
1630
1631                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632         }
1633         kfree(good_mbuf);
1634         return 0;
1635 }
1636
1637 static void
1638 bnx2_set_mac_addr(struct bnx2 *bp)
1639 {
1640         u32 val;
1641         u8 *mac_addr = bp->dev->dev_addr;
1642
1643         val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
1647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1648                 (mac_addr[4] << 8) | mac_addr[5];
1649
1650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651 }
1652
1653 static inline int
1654 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655 {
1656         struct sk_buff *skb;
1657         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658         dma_addr_t mapping;
1659         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1660         unsigned long align;
1661
1662         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1663         if (skb == NULL) {
1664                 return -ENOMEM;
1665         }
1666
1667         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1669
1670         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671                 PCI_DMA_FROMDEVICE);
1672
1673         rx_buf->skb = skb;
1674         pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679         bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681         return 0;
1682 }
1683
1684 static void
1685 bnx2_phy_int(struct bnx2 *bp)
1686 {
1687         u32 new_link_state, old_link_state;
1688
1689         new_link_state = bp->status_blk->status_attn_bits &
1690                 STATUS_ATTN_BITS_LINK_STATE;
1691         old_link_state = bp->status_blk->status_attn_bits_ack &
1692                 STATUS_ATTN_BITS_LINK_STATE;
1693         if (new_link_state != old_link_state) {
1694                 if (new_link_state) {
1695                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696                                 STATUS_ATTN_BITS_LINK_STATE);
1697                 }
1698                 else {
1699                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700                                 STATUS_ATTN_BITS_LINK_STATE);
1701                 }
1702                 bnx2_set_link(bp);
1703         }
1704 }
1705
1706 static void
1707 bnx2_tx_int(struct bnx2 *bp)
1708 {
1709         struct status_block *sblk = bp->status_blk;
1710         u16 hw_cons, sw_cons, sw_ring_cons;
1711         int tx_free_bd = 0;
1712
1713         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1714         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715                 hw_cons++;
1716         }
1717         sw_cons = bp->tx_cons;
1718
1719         while (sw_cons != hw_cons) {
1720                 struct sw_bd *tx_buf;
1721                 struct sk_buff *skb;
1722                 int i, last;
1723
1724                 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727                 skb = tx_buf->skb;
1728
1729                 /* partial BD completions possible with TSO packets */
1730                 if (skb_is_gso(skb)) {
1731                         u16 last_idx, last_ring_idx;
1732
1733                         last_idx = sw_cons +
1734                                 skb_shinfo(skb)->nr_frags + 1;
1735                         last_ring_idx = sw_ring_cons +
1736                                 skb_shinfo(skb)->nr_frags + 1;
1737                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738                                 last_idx++;
1739                         }
1740                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741                                 break;
1742                         }
1743                 }
1744
1745                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746                         skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748                 tx_buf->skb = NULL;
1749                 last = skb_shinfo(skb)->nr_frags;
1750
1751                 for (i = 0; i < last; i++) {
1752                         sw_cons = NEXT_TX_BD(sw_cons);
1753
1754                         pci_unmap_page(bp->pdev,
1755                                 pci_unmap_addr(
1756                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757                                         mapping),
1758                                 skb_shinfo(skb)->frags[i].size,
1759                                 PCI_DMA_TODEVICE);
1760                 }
1761
1762                 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764                 tx_free_bd += last + 1;
1765
1766                 dev_kfree_skb(skb);
1767
1768                 hw_cons = bp->hw_tx_cons =
1769                         sblk->status_tx_quick_consumer_index0;
1770
1771                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772                         hw_cons++;
1773                 }
1774         }
1775
1776         bp->tx_cons = sw_cons;
1777         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778          * before checking for netif_queue_stopped().  Without the
1779          * memory barrier, there is a small possibility that bnx2_start_xmit()
1780          * will miss it and cause the queue to be stopped forever.
1781          */
1782         smp_mb();
1783
1784         if (unlikely(netif_queue_stopped(bp->dev)) &&
1785                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786                 netif_tx_lock(bp->dev);
1787                 if ((netif_queue_stopped(bp->dev)) &&
1788                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1789                         netif_wake_queue(bp->dev);
1790                 netif_tx_unlock(bp->dev);
1791         }
1792 }
1793
1794 static inline void
1795 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796         u16 cons, u16 prod)
1797 {
1798         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799         struct rx_bd *cons_bd, *prod_bd;
1800
1801         cons_rx_buf = &bp->rx_buf_ring[cons];
1802         prod_rx_buf = &bp->rx_buf_ring[prod];
1803
1804         pci_dma_sync_single_for_device(bp->pdev,
1805                 pci_unmap_addr(cons_rx_buf, mapping),
1806                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
1808         bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810         prod_rx_buf->skb = skb;
1811
1812         if (cons == prod)
1813                 return;
1814
1815         pci_unmap_addr_set(prod_rx_buf, mapping,
1816                         pci_unmap_addr(cons_rx_buf, mapping));
1817
1818         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1820         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1822 }
1823
1824 static int
1825 bnx2_rx_int(struct bnx2 *bp, int budget)
1826 {
1827         struct status_block *sblk = bp->status_blk;
1828         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829         struct l2_fhdr *rx_hdr;
1830         int rx_pkt = 0;
1831
1832         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1833         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834                 hw_cons++;
1835         }
1836         sw_cons = bp->rx_cons;
1837         sw_prod = bp->rx_prod;
1838
1839         /* Memory barrier necessary as speculative reads of the rx
1840          * buffer can be ahead of the index in the status block
1841          */
1842         rmb();
1843         while (sw_cons != hw_cons) {
1844                 unsigned int len;
1845                 u32 status;
1846                 struct sw_bd *rx_buf;
1847                 struct sk_buff *skb;
1848                 dma_addr_t dma_addr;
1849
1850                 sw_ring_cons = RX_RING_IDX(sw_cons);
1851                 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854                 skb = rx_buf->skb;
1855
1856                 rx_buf->skb = NULL;
1857
1858                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1861                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863                 rx_hdr = (struct l2_fhdr *) skb->data;
1864                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
1866                 if ((status = rx_hdr->l2_fhdr_status) &
1867                         (L2_FHDR_ERRORS_BAD_CRC |
1868                         L2_FHDR_ERRORS_PHY_DECODE |
1869                         L2_FHDR_ERRORS_ALIGNMENT |
1870                         L2_FHDR_ERRORS_TOO_SHORT |
1871                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873                         goto reuse_rx;
1874                 }
1875
1876                 /* Since we don't have a jumbo ring, copy small packets
1877                  * if mtu > 1500
1878                  */
1879                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880                         struct sk_buff *new_skb;
1881
1882                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1883                         if (new_skb == NULL)
1884                                 goto reuse_rx;
1885
1886                         /* aligned copy */
1887                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1888                                       new_skb->data, len + 2);
1889                         skb_reserve(new_skb, 2);
1890                         skb_put(new_skb, len);
1891
1892                         bnx2_reuse_rx_skb(bp, skb,
1893                                 sw_ring_cons, sw_ring_prod);
1894
1895                         skb = new_skb;
1896                 }
1897                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1898                         pci_unmap_single(bp->pdev, dma_addr,
1899                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1900
1901                         skb_reserve(skb, bp->rx_offset);
1902                         skb_put(skb, len);
1903                 }
1904                 else {
1905 reuse_rx:
1906                         bnx2_reuse_rx_skb(bp, skb,
1907                                 sw_ring_cons, sw_ring_prod);
1908                         goto next_rx;
1909                 }
1910
1911                 skb->protocol = eth_type_trans(skb, bp->dev);
1912
1913                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1914                         (ntohs(skb->protocol) != 0x8100)) {
1915
1916                         dev_kfree_skb(skb);
1917                         goto next_rx;
1918
1919                 }
1920
1921                 skb->ip_summed = CHECKSUM_NONE;
1922                 if (bp->rx_csum &&
1923                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1924                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1925
1926                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1927                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1928                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929                 }
1930
1931 #ifdef BCM_VLAN
1932                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1933                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1934                                 rx_hdr->l2_fhdr_vlan_tag);
1935                 }
1936                 else
1937 #endif
1938                         netif_receive_skb(skb);
1939
1940                 bp->dev->last_rx = jiffies;
1941                 rx_pkt++;
1942
1943 next_rx:
1944                 sw_cons = NEXT_RX_BD(sw_cons);
1945                 sw_prod = NEXT_RX_BD(sw_prod);
1946
1947                 if ((rx_pkt == budget))
1948                         break;
1949
1950                 /* Refresh hw_cons to see if there is new work */
1951                 if (sw_cons == hw_cons) {
1952                         hw_cons = bp->hw_rx_cons =
1953                                 sblk->status_rx_quick_consumer_index0;
1954                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1955                                 hw_cons++;
1956                         rmb();
1957                 }
1958         }
1959         bp->rx_cons = sw_cons;
1960         bp->rx_prod = sw_prod;
1961
1962         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1963
1964         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1965
1966         mmiowb();
1967
1968         return rx_pkt;
1969
1970 }
1971
1972 /* MSI ISR - The only difference between this and the INTx ISR
1973  * is that the MSI interrupt is always serviced.
1974  */
1975 static irqreturn_t
1976 bnx2_msi(int irq, void *dev_instance)
1977 {
1978         struct net_device *dev = dev_instance;
1979         struct bnx2 *bp = netdev_priv(dev);
1980
1981         prefetch(bp->status_blk);
1982         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1983                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1984                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1985
1986         /* Return here if interrupt is disabled. */
1987         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1988                 return IRQ_HANDLED;
1989
1990         netif_rx_schedule(dev);
1991
1992         return IRQ_HANDLED;
1993 }
1994
1995 static irqreturn_t
1996 bnx2_interrupt(int irq, void *dev_instance)
1997 {
1998         struct net_device *dev = dev_instance;
1999         struct bnx2 *bp = netdev_priv(dev);
2000
2001         /* When using INTx, it is possible for the interrupt to arrive
2002          * at the CPU before the status block posted prior to the
2003          * interrupt. Reading a register will flush the status block.
2004          * When using MSI, the MSI message will always complete after
2005          * the status block write.
2006          */
2007         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2008             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2009              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2010                 return IRQ_NONE;
2011
2012         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2014                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2015
2016         /* Return here if interrupt is shared and is disabled. */
2017         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2018                 return IRQ_HANDLED;
2019
2020         netif_rx_schedule(dev);
2021
2022         return IRQ_HANDLED;
2023 }
2024
2025 static inline int
2026 bnx2_has_work(struct bnx2 *bp)
2027 {
2028         struct status_block *sblk = bp->status_blk;
2029
2030         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2031             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2032                 return 1;
2033
2034         if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2035             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2036                 return 1;
2037
2038         return 0;
2039 }
2040
2041 static int
2042 bnx2_poll(struct net_device *dev, int *budget)
2043 {
2044         struct bnx2 *bp = netdev_priv(dev);
2045
2046         if ((bp->status_blk->status_attn_bits &
2047                 STATUS_ATTN_BITS_LINK_STATE) !=
2048                 (bp->status_blk->status_attn_bits_ack &
2049                 STATUS_ATTN_BITS_LINK_STATE)) {
2050
2051                 spin_lock(&bp->phy_lock);
2052                 bnx2_phy_int(bp);
2053                 spin_unlock(&bp->phy_lock);
2054
2055                 /* This is needed to take care of transient status
2056                  * during link changes.
2057                  */
2058                 REG_WR(bp, BNX2_HC_COMMAND,
2059                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2060                 REG_RD(bp, BNX2_HC_COMMAND);
2061         }
2062
2063         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2064                 bnx2_tx_int(bp);
2065
2066         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2067                 int orig_budget = *budget;
2068                 int work_done;
2069
2070                 if (orig_budget > dev->quota)
2071                         orig_budget = dev->quota;
2072
2073                 work_done = bnx2_rx_int(bp, orig_budget);
2074                 *budget -= work_done;
2075                 dev->quota -= work_done;
2076         }
2077
2078         bp->last_status_idx = bp->status_blk->status_idx;
2079         rmb();
2080
2081         if (!bnx2_has_work(bp)) {
2082                 netif_rx_complete(dev);
2083                 if (likely(bp->flags & USING_MSI_FLAG)) {
2084                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2085                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2086                                bp->last_status_idx);
2087                         return 0;
2088                 }
2089                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2092                        bp->last_status_idx);
2093
2094                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096                        bp->last_status_idx);
2097                 return 0;
2098         }
2099
2100         return 1;
2101 }
2102
2103 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2104  * from set_multicast.
2105  */
2106 static void
2107 bnx2_set_rx_mode(struct net_device *dev)
2108 {
2109         struct bnx2 *bp = netdev_priv(dev);
2110         u32 rx_mode, sort_mode;
2111         int i;
2112
2113         spin_lock_bh(&bp->phy_lock);
2114
2115         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2116                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2117         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2118 #ifdef BCM_VLAN
2119         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2120                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 #else
2122         if (!(bp->flags & ASF_ENABLE_FLAG))
2123                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2124 #endif
2125         if (dev->flags & IFF_PROMISC) {
2126                 /* Promiscuous mode. */
2127                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2128                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2129                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2130         }
2131         else if (dev->flags & IFF_ALLMULTI) {
2132                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2133                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2134                                0xffffffff);
2135                 }
2136                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2137         }
2138         else {
2139                 /* Accept one or more multicast(s). */
2140                 struct dev_mc_list *mclist;
2141                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2142                 u32 regidx;
2143                 u32 bit;
2144                 u32 crc;
2145
2146                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2147
2148                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2149                      i++, mclist = mclist->next) {
2150
2151                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2152                         bit = crc & 0xff;
2153                         regidx = (bit & 0xe0) >> 5;
2154                         bit &= 0x1f;
2155                         mc_filter[regidx] |= (1 << bit);
2156                 }
2157
2158                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2159                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2160                                mc_filter[i]);
2161                 }
2162
2163                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2164         }
2165
2166         if (rx_mode != bp->rx_mode) {
2167                 bp->rx_mode = rx_mode;
2168                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2169         }
2170
2171         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2172         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2173         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2174
2175         spin_unlock_bh(&bp->phy_lock);
2176 }
2177
2178 #define FW_BUF_SIZE     0x8000
2179
2180 static int
2181 bnx2_gunzip_init(struct bnx2 *bp)
2182 {
2183         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2184                 goto gunzip_nomem1;
2185
2186         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2187                 goto gunzip_nomem2;
2188
2189         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2190         if (bp->strm->workspace == NULL)
2191                 goto gunzip_nomem3;
2192
2193         return 0;
2194
2195 gunzip_nomem3:
2196         kfree(bp->strm);
2197         bp->strm = NULL;
2198
2199 gunzip_nomem2:
2200         vfree(bp->gunzip_buf);
2201         bp->gunzip_buf = NULL;
2202
2203 gunzip_nomem1:
2204         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2205                             "uncompression.\n", bp->dev->name);
2206         return -ENOMEM;
2207 }
2208
2209 static void
2210 bnx2_gunzip_end(struct bnx2 *bp)
2211 {
2212         kfree(bp->strm->workspace);
2213
2214         kfree(bp->strm);
2215         bp->strm = NULL;
2216
2217         if (bp->gunzip_buf) {
2218                 vfree(bp->gunzip_buf);
2219                 bp->gunzip_buf = NULL;
2220         }
2221 }
2222
2223 static int
2224 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2225 {
2226         int n, rc;
2227
2228         /* check gzip header */
2229         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2230                 return -EINVAL;
2231
2232         n = 10;
2233
2234 #define FNAME   0x8
2235         if (zbuf[3] & FNAME)
2236                 while ((zbuf[n++] != 0) && (n < len));
2237
2238         bp->strm->next_in = zbuf + n;
2239         bp->strm->avail_in = len - n;
2240         bp->strm->next_out = bp->gunzip_buf;
2241         bp->strm->avail_out = FW_BUF_SIZE;
2242
2243         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2244         if (rc != Z_OK)
2245                 return rc;
2246
2247         rc = zlib_inflate(bp->strm, Z_FINISH);
2248
2249         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2250         *outbuf = bp->gunzip_buf;
2251
2252         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2253                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2254                        bp->dev->name, bp->strm->msg);
2255
2256         zlib_inflateEnd(bp->strm);
2257
2258         if (rc == Z_STREAM_END)
2259                 return 0;
2260
2261         return rc;
2262 }
2263
2264 static void
2265 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2266         u32 rv2p_proc)
2267 {
2268         int i;
2269         u32 val;
2270
2271
2272         for (i = 0; i < rv2p_code_len; i += 8) {
2273                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2274                 rv2p_code++;
2275                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2276                 rv2p_code++;
2277
2278                 if (rv2p_proc == RV2P_PROC1) {
2279                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2280                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2281                 }
2282                 else {
2283                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2284                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2285                 }
2286         }
2287
2288         /* Reset the processor, un-stall is done later. */
2289         if (rv2p_proc == RV2P_PROC1) {
2290                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2291         }
2292         else {
2293                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2294         }
2295 }
2296
2297 static int
2298 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2299 {
2300         u32 offset;
2301         u32 val;
2302         int rc;
2303
2304         /* Halt the CPU. */
2305         val = REG_RD_IND(bp, cpu_reg->mode);
2306         val |= cpu_reg->mode_value_halt;
2307         REG_WR_IND(bp, cpu_reg->mode, val);
2308         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2309
2310         /* Load the Text area. */
2311         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2312         if (fw->gz_text) {
2313                 u32 text_len;
2314                 void *text;
2315
2316                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2317                                  &text_len);
2318                 if (rc)
2319                         return rc;
2320
2321                 fw->text = text;
2322         }
2323         if (fw->gz_text) {
2324                 int j;
2325
2326                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2327                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2328                 }
2329         }
2330
2331         /* Load the Data area. */
2332         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2333         if (fw->data) {
2334                 int j;
2335
2336                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2337                         REG_WR_IND(bp, offset, fw->data[j]);
2338                 }
2339         }
2340
2341         /* Load the SBSS area. */
2342         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2343         if (fw->sbss) {
2344                 int j;
2345
2346                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2347                         REG_WR_IND(bp, offset, fw->sbss[j]);
2348                 }
2349         }
2350
2351         /* Load the BSS area. */
2352         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2353         if (fw->bss) {
2354                 int j;
2355
2356                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2357                         REG_WR_IND(bp, offset, fw->bss[j]);
2358                 }
2359         }
2360
2361         /* Load the Read-Only area. */
2362         offset = cpu_reg->spad_base +
2363                 (fw->rodata_addr - cpu_reg->mips_view_base);
2364         if (fw->rodata) {
2365                 int j;
2366
2367                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2368                         REG_WR_IND(bp, offset, fw->rodata[j]);
2369                 }
2370         }
2371
2372         /* Clear the pre-fetch instruction. */
2373         REG_WR_IND(bp, cpu_reg->inst, 0);
2374         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2375
2376         /* Start the CPU. */
2377         val = REG_RD_IND(bp, cpu_reg->mode);
2378         val &= ~cpu_reg->mode_value_halt;
2379         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2380         REG_WR_IND(bp, cpu_reg->mode, val);
2381
2382         return 0;
2383 }
2384
2385 static int
2386 bnx2_init_cpus(struct bnx2 *bp)
2387 {
2388         struct cpu_reg cpu_reg;
2389         struct fw_info *fw;
2390         int rc = 0;
2391         void *text;
2392         u32 text_len;
2393
2394         if ((rc = bnx2_gunzip_init(bp)) != 0)
2395                 return rc;
2396
2397         /* Initialize the RV2P processor. */
2398         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2399                          &text_len);
2400         if (rc)
2401                 goto init_cpu_err;
2402
2403         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2404
2405         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2406                          &text_len);
2407         if (rc)
2408                 goto init_cpu_err;
2409
2410         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2411
2412         /* Initialize the RX Processor. */
2413         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2414         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2415         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2416         cpu_reg.state = BNX2_RXP_CPU_STATE;
2417         cpu_reg.state_value_clear = 0xffffff;
2418         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2419         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2420         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2421         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2422         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2423         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2424         cpu_reg.mips_view_base = 0x8000000;
2425
2426         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2427                 fw = &bnx2_rxp_fw_09;
2428         else
2429                 fw = &bnx2_rxp_fw_06;
2430
2431         rc = load_cpu_fw(bp, &cpu_reg, fw);
2432         if (rc)
2433                 goto init_cpu_err;
2434
2435         /* Initialize the TX Processor. */
2436         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2437         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2438         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2439         cpu_reg.state = BNX2_TXP_CPU_STATE;
2440         cpu_reg.state_value_clear = 0xffffff;
2441         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2442         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2443         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2444         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2445         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2446         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2447         cpu_reg.mips_view_base = 0x8000000;
2448
2449         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2450                 fw = &bnx2_txp_fw_09;
2451         else
2452                 fw = &bnx2_txp_fw_06;
2453
2454         rc = load_cpu_fw(bp, &cpu_reg, fw);
2455         if (rc)
2456                 goto init_cpu_err;
2457
2458         /* Initialize the TX Patch-up Processor. */
2459         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463         cpu_reg.state_value_clear = 0xffffff;
2464         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470         cpu_reg.mips_view_base = 0x8000000;
2471
2472         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2473                 fw = &bnx2_tpat_fw_09;
2474         else
2475                 fw = &bnx2_tpat_fw_06;
2476
2477         rc = load_cpu_fw(bp, &cpu_reg, fw);
2478         if (rc)
2479                 goto init_cpu_err;
2480
2481         /* Initialize the Completion Processor. */
2482         cpu_reg.mode = BNX2_COM_CPU_MODE;
2483         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2484         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2485         cpu_reg.state = BNX2_COM_CPU_STATE;
2486         cpu_reg.state_value_clear = 0xffffff;
2487         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2488         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2489         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2490         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2491         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2492         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2493         cpu_reg.mips_view_base = 0x8000000;
2494
2495         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2496                 fw = &bnx2_com_fw_09;
2497         else
2498                 fw = &bnx2_com_fw_06;
2499
2500         rc = load_cpu_fw(bp, &cpu_reg, fw);
2501         if (rc)
2502                 goto init_cpu_err;
2503
2504         /* Initialize the Command Processor. */
2505         cpu_reg.mode = BNX2_CP_CPU_MODE;
2506         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2507         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2508         cpu_reg.state = BNX2_CP_CPU_STATE;
2509         cpu_reg.state_value_clear = 0xffffff;
2510         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2511         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2512         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2513         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2514         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2515         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2516         cpu_reg.mips_view_base = 0x8000000;
2517
2518         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2519                 fw = &bnx2_cp_fw_09;
2520
2521                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2522                 if (rc)
2523                         goto init_cpu_err;
2524         }
2525 init_cpu_err:
2526         bnx2_gunzip_end(bp);
2527         return rc;
2528 }
2529
2530 static int
2531 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2532 {
2533         u16 pmcsr;
2534
2535         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2536
2537         switch (state) {
2538         case PCI_D0: {
2539                 u32 val;
2540
2541                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2542                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2543                         PCI_PM_CTRL_PME_STATUS);
2544
2545                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2546                         /* delay required during transition out of D3hot */
2547                         msleep(20);
2548
2549                 val = REG_RD(bp, BNX2_EMAC_MODE);
2550                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2551                 val &= ~BNX2_EMAC_MODE_MPKT;
2552                 REG_WR(bp, BNX2_EMAC_MODE, val);
2553
2554                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2555                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2556                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2557                 break;
2558         }
2559         case PCI_D3hot: {
2560                 int i;
2561                 u32 val, wol_msg;
2562
2563                 if (bp->wol) {
2564                         u32 advertising;
2565                         u8 autoneg;
2566
2567                         autoneg = bp->autoneg;
2568                         advertising = bp->advertising;
2569
2570                         bp->autoneg = AUTONEG_SPEED;
2571                         bp->advertising = ADVERTISED_10baseT_Half |
2572                                 ADVERTISED_10baseT_Full |
2573                                 ADVERTISED_100baseT_Half |
2574                                 ADVERTISED_100baseT_Full |
2575                                 ADVERTISED_Autoneg;
2576
2577                         bnx2_setup_copper_phy(bp);
2578
2579                         bp->autoneg = autoneg;
2580                         bp->advertising = advertising;
2581
2582                         bnx2_set_mac_addr(bp);
2583
2584                         val = REG_RD(bp, BNX2_EMAC_MODE);
2585
2586                         /* Enable port mode. */
2587                         val &= ~BNX2_EMAC_MODE_PORT;
2588                         val |= BNX2_EMAC_MODE_PORT_MII |
2589                                BNX2_EMAC_MODE_MPKT_RCVD |
2590                                BNX2_EMAC_MODE_ACPI_RCVD |
2591                                BNX2_EMAC_MODE_MPKT;
2592
2593                         REG_WR(bp, BNX2_EMAC_MODE, val);
2594
2595                         /* receive all multicast */
2596                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2597                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2598                                        0xffffffff);
2599                         }
2600                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2601                                BNX2_EMAC_RX_MODE_SORT_MODE);
2602
2603                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2604                               BNX2_RPM_SORT_USER0_MC_EN;
2605                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2606                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2607                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2608                                BNX2_RPM_SORT_USER0_ENA);
2609
2610                         /* Need to enable EMAC and RPM for WOL. */
2611                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2612                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2613                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2614                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2615
2616                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2617                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2618                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2619
2620                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2621                 }
2622                 else {
2623                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2624                 }
2625
2626                 if (!(bp->flags & NO_WOL_FLAG))
2627                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2628
2629                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2630                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2631                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2632
2633                         if (bp->wol)
2634                                 pmcsr |= 3;
2635                 }
2636                 else {
2637                         pmcsr |= 3;
2638                 }
2639                 if (bp->wol) {
2640                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2641                 }
2642                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2643                                       pmcsr);
2644
2645                 /* No more memory access after this point until
2646                  * device is brought back to D0.
2647                  */
2648                 udelay(50);
2649                 break;
2650         }
2651         default:
2652                 return -EINVAL;
2653         }
2654         return 0;
2655 }
2656
2657 static int
2658 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2659 {
2660         u32 val;
2661         int j;
2662
2663         /* Request access to the flash interface. */
2664         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2665         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2666                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2667                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2668                         break;
2669
2670                 udelay(5);
2671         }
2672
2673         if (j >= NVRAM_TIMEOUT_COUNT)
2674                 return -EBUSY;
2675
2676         return 0;
2677 }
2678
2679 static int
2680 bnx2_release_nvram_lock(struct bnx2 *bp)
2681 {
2682         int j;
2683         u32 val;
2684
2685         /* Relinquish nvram interface. */
2686         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2687
2688         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2689                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2690                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2691                         break;
2692
2693                 udelay(5);
2694         }
2695
2696         if (j >= NVRAM_TIMEOUT_COUNT)
2697                 return -EBUSY;
2698
2699         return 0;
2700 }
2701
2702
2703 static int
2704 bnx2_enable_nvram_write(struct bnx2 *bp)
2705 {
2706         u32 val;
2707
2708         val = REG_RD(bp, BNX2_MISC_CFG);
2709         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2710
2711         if (!bp->flash_info->buffered) {
2712                 int j;
2713
2714                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2715                 REG_WR(bp, BNX2_NVM_COMMAND,
2716                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2717
2718                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719                         udelay(5);
2720
2721                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2722                         if (val & BNX2_NVM_COMMAND_DONE)
2723                                 break;
2724                 }
2725
2726                 if (j >= NVRAM_TIMEOUT_COUNT)
2727                         return -EBUSY;
2728         }
2729         return 0;
2730 }
2731
2732 static void
2733 bnx2_disable_nvram_write(struct bnx2 *bp)
2734 {
2735         u32 val;
2736
2737         val = REG_RD(bp, BNX2_MISC_CFG);
2738         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2739 }
2740
2741
2742 static void
2743 bnx2_enable_nvram_access(struct bnx2 *bp)
2744 {
2745         u32 val;
2746
2747         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2748         /* Enable both bits, even on read. */
2749         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2750                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2751 }
2752
2753 static void
2754 bnx2_disable_nvram_access(struct bnx2 *bp)
2755 {
2756         u32 val;
2757
2758         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2759         /* Disable both bits, even after read. */
2760         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2761                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2762                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2763 }
2764
2765 static int
2766 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2767 {
2768         u32 cmd;
2769         int j;
2770
2771         if (bp->flash_info->buffered)
2772                 /* Buffered flash, no erase needed */
2773                 return 0;
2774
2775         /* Build an erase command */
2776         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2777               BNX2_NVM_COMMAND_DOIT;
2778
2779         /* Need to clear DONE bit separately. */
2780         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2781
2782         /* Address of the NVRAM to read from. */
2783         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2784
2785         /* Issue an erase command. */
2786         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2787
2788         /* Wait for completion. */
2789         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2790                 u32 val;
2791
2792                 udelay(5);
2793
2794                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2795                 if (val & BNX2_NVM_COMMAND_DONE)
2796                         break;
2797         }
2798
2799         if (j >= NVRAM_TIMEOUT_COUNT)
2800                 return -EBUSY;
2801
2802         return 0;
2803 }
2804
2805 static int
2806 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2807 {
2808         u32 cmd;
2809         int j;
2810
2811         /* Build the command word. */
2812         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2813
2814         /* Calculate an offset of a buffered flash. */
2815         if (bp->flash_info->buffered) {
2816                 offset = ((offset / bp->flash_info->page_size) <<
2817                            bp->flash_info->page_bits) +
2818                           (offset % bp->flash_info->page_size);
2819         }
2820
2821         /* Need to clear DONE bit separately. */
2822         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2823
2824         /* Address of the NVRAM to read from. */
2825         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2826
2827         /* Issue a read command. */
2828         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2829
2830         /* Wait for completion. */
2831         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2832                 u32 val;
2833
2834                 udelay(5);
2835
2836                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2837                 if (val & BNX2_NVM_COMMAND_DONE) {
2838                         val = REG_RD(bp, BNX2_NVM_READ);
2839
2840                         val = be32_to_cpu(val);
2841                         memcpy(ret_val, &val, 4);
2842                         break;
2843                 }
2844         }
2845         if (j >= NVRAM_TIMEOUT_COUNT)
2846                 return -EBUSY;
2847
2848         return 0;
2849 }
2850
2851
2852 static int
2853 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2854 {
2855         u32 cmd, val32;
2856         int j;
2857
2858         /* Build the command word. */
2859         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2860
2861         /* Calculate an offset of a buffered flash. */
2862         if (bp->flash_info->buffered) {
2863                 offset = ((offset / bp->flash_info->page_size) <<
2864                           bp->flash_info->page_bits) +
2865                          (offset % bp->flash_info->page_size);
2866         }
2867
2868         /* Need to clear DONE bit separately. */
2869         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2870
2871         memcpy(&val32, val, 4);
2872         val32 = cpu_to_be32(val32);
2873
2874         /* Write the data. */
2875         REG_WR(bp, BNX2_NVM_WRITE, val32);
2876
2877         /* Address of the NVRAM to write to. */
2878         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2879
2880         /* Issue the write command. */
2881         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2882
2883         /* Wait for completion. */
2884         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2885                 udelay(5);
2886
2887                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2888                         break;
2889         }
2890         if (j >= NVRAM_TIMEOUT_COUNT)
2891                 return -EBUSY;
2892
2893         return 0;
2894 }
2895
2896 static int
2897 bnx2_init_nvram(struct bnx2 *bp)
2898 {
2899         u32 val;
2900         int j, entry_count, rc;
2901         struct flash_spec *flash;
2902
2903         /* Determine the selected interface. */
2904         val = REG_RD(bp, BNX2_NVM_CFG1);
2905
2906         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2907
2908         rc = 0;
2909         if (val & 0x40000000) {
2910
2911                 /* Flash interface has been reconfigured */
2912                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2913                      j++, flash++) {
2914                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2915                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2916                                 bp->flash_info = flash;
2917                                 break;
2918                         }
2919                 }
2920         }
2921         else {
2922                 u32 mask;
2923                 /* Not yet been reconfigured */
2924
2925                 if (val & (1 << 23))
2926                         mask = FLASH_BACKUP_STRAP_MASK;
2927                 else
2928                         mask = FLASH_STRAP_MASK;
2929
2930                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2931                         j++, flash++) {
2932
2933                         if ((val & mask) == (flash->strapping & mask)) {
2934                                 bp->flash_info = flash;
2935
2936                                 /* Request access to the flash interface. */
2937                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2938                                         return rc;
2939
2940                                 /* Enable access to flash interface */
2941                                 bnx2_enable_nvram_access(bp);
2942
2943                                 /* Reconfigure the flash interface */
2944                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2945                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2946                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2947                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2948
2949                                 /* Disable access to flash interface */
2950                                 bnx2_disable_nvram_access(bp);
2951                                 bnx2_release_nvram_lock(bp);
2952
2953                                 break;
2954                         }
2955                 }
2956         } /* if (val & 0x40000000) */
2957
2958         if (j == entry_count) {
2959                 bp->flash_info = NULL;
2960                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2961                 return -ENODEV;
2962         }
2963
2964         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2965         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2966         if (val)
2967                 bp->flash_size = val;
2968         else
2969                 bp->flash_size = bp->flash_info->total_size;
2970
2971         return rc;
2972 }
2973
2974 static int
2975 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976                 int buf_size)
2977 {
2978         int rc = 0;
2979         u32 cmd_flags, offset32, len32, extra;
2980
2981         if (buf_size == 0)
2982                 return 0;
2983
2984         /* Request access to the flash interface. */
2985         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2986                 return rc;
2987
2988         /* Enable access to flash interface */
2989         bnx2_enable_nvram_access(bp);
2990
2991         len32 = buf_size;
2992         offset32 = offset;
2993         extra = 0;
2994
2995         cmd_flags = 0;
2996
2997         if (offset32 & 3) {
2998                 u8 buf[4];
2999                 u32 pre_len;
3000
3001                 offset32 &= ~3;
3002                 pre_len = 4 - (offset & 3);
3003
3004                 if (pre_len >= len32) {
3005                         pre_len = len32;
3006                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3007                                     BNX2_NVM_COMMAND_LAST;
3008                 }
3009                 else {
3010                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3011                 }
3012
3013                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3014
3015                 if (rc)
3016                         return rc;
3017
3018                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3019
3020                 offset32 += 4;
3021                 ret_buf += pre_len;
3022                 len32 -= pre_len;
3023         }
3024         if (len32 & 3) {
3025                 extra = 4 - (len32 & 3);
3026                 len32 = (len32 + 4) & ~3;
3027         }
3028
3029         if (len32 == 4) {
3030                 u8 buf[4];
3031
3032                 if (cmd_flags)
3033                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3034                 else
3035                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3036                                     BNX2_NVM_COMMAND_LAST;
3037
3038                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3039
3040                 memcpy(ret_buf, buf, 4 - extra);
3041         }
3042         else if (len32 > 0) {
3043                 u8 buf[4];
3044
3045                 /* Read the first word. */
3046                 if (cmd_flags)
3047                         cmd_flags = 0;
3048                 else
3049                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3050
3051                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3052
3053                 /* Advance to the next dword. */
3054                 offset32 += 4;
3055                 ret_buf += 4;
3056                 len32 -= 4;
3057
3058                 while (len32 > 4 && rc == 0) {
3059                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3060
3061                         /* Advance to the next dword. */
3062                         offset32 += 4;
3063                         ret_buf += 4;
3064                         len32 -= 4;
3065                 }
3066
3067                 if (rc)
3068                         return rc;
3069
3070                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3071                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3072
3073                 memcpy(ret_buf, buf, 4 - extra);
3074         }
3075
3076         /* Disable access to flash interface */
3077         bnx2_disable_nvram_access(bp);
3078
3079         bnx2_release_nvram_lock(bp);
3080
3081         return rc;
3082 }
3083
3084 static int
3085 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3086                 int buf_size)
3087 {
3088         u32 written, offset32, len32;
3089         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3090         int rc = 0;
3091         int align_start, align_end;
3092
3093         buf = data_buf;
3094         offset32 = offset;
3095         len32 = buf_size;
3096         align_start = align_end = 0;
3097
3098         if ((align_start = (offset32 & 3))) {
3099                 offset32 &= ~3;
3100                 len32 += align_start;
3101                 if (len32 < 4)
3102                         len32 = 4;
3103                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3104                         return rc;
3105         }
3106
3107         if (len32 & 3) {
3108                 align_end = 4 - (len32 & 3);
3109                 len32 += align_end;
3110                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3111                         return rc;
3112         }
3113
3114         if (align_start || align_end) {
3115                 align_buf = kmalloc(len32, GFP_KERNEL);
3116                 if (align_buf == NULL)
3117                         return -ENOMEM;
3118                 if (align_start) {
3119                         memcpy(align_buf, start, 4);
3120                 }
3121                 if (align_end) {
3122                         memcpy(align_buf + len32 - 4, end, 4);
3123                 }
3124                 memcpy(align_buf + align_start, data_buf, buf_size);
3125                 buf = align_buf;
3126         }
3127
3128         if (bp->flash_info->buffered == 0) {
3129                 flash_buffer = kmalloc(264, GFP_KERNEL);
3130                 if (flash_buffer == NULL) {
3131                         rc = -ENOMEM;
3132                         goto nvram_write_end;
3133                 }
3134         }
3135
3136         written = 0;
3137         while ((written < len32) && (rc == 0)) {
3138                 u32 page_start, page_end, data_start, data_end;
3139                 u32 addr, cmd_flags;
3140                 int i;
3141
3142                 /* Find the page_start addr */
3143                 page_start = offset32 + written;
3144                 page_start -= (page_start % bp->flash_info->page_size);
3145                 /* Find the page_end addr */
3146                 page_end = page_start + bp->flash_info->page_size;
3147                 /* Find the data_start addr */
3148                 data_start = (written == 0) ? offset32 : page_start;
3149                 /* Find the data_end addr */
3150                 data_end = (page_end > offset32 + len32) ?
3151                         (offset32 + len32) : page_end;
3152
3153                 /* Request access to the flash interface. */
3154                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155                         goto nvram_write_end;
3156
3157                 /* Enable access to flash interface */
3158                 bnx2_enable_nvram_access(bp);
3159
3160                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161                 if (bp->flash_info->buffered == 0) {
3162                         int j;
3163
3164                         /* Read the whole page into the buffer
3165                          * (non-buffer flash only) */
3166                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167                                 if (j == (bp->flash_info->page_size - 4)) {
3168                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3169                                 }
3170                                 rc = bnx2_nvram_read_dword(bp,
3171                                         page_start + j,
3172                                         &flash_buffer[j],
3173                                         cmd_flags);
3174
3175                                 if (rc)
3176                                         goto nvram_write_end;
3177
3178                                 cmd_flags = 0;
3179                         }
3180                 }
3181
3182                 /* Enable writes to flash interface (unlock write-protect) */
3183                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184                         goto nvram_write_end;
3185
3186                 /* Loop to write back the buffer data from page_start to
3187                  * data_start */
3188                 i = 0;
3189                 if (bp->flash_info->buffered == 0) {
3190                         /* Erase the page */
3191                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3192                                 goto nvram_write_end;
3193
3194                         /* Re-enable the write again for the actual write */
3195                         bnx2_enable_nvram_write(bp);
3196
3197                         for (addr = page_start; addr < data_start;
3198                                 addr += 4, i += 4) {
3199
3200                                 rc = bnx2_nvram_write_dword(bp, addr,
3201                                         &flash_buffer[i], cmd_flags);
3202
3203                                 if (rc != 0)
3204                                         goto nvram_write_end;
3205
3206                                 cmd_flags = 0;
3207                         }
3208                 }
3209
3210                 /* Loop to write the new data from data_start to data_end */
3211                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3212                         if ((addr == page_end - 4) ||
3213                                 ((bp->flash_info->buffered) &&
3214                                  (addr == data_end - 4))) {
3215
3216                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3217                         }
3218                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3219                                 cmd_flags);
3220
3221                         if (rc != 0)
3222                                 goto nvram_write_end;
3223
3224                         cmd_flags = 0;
3225                         buf += 4;
3226                 }
3227
3228                 /* Loop to write back the buffer data from data_end
3229                  * to page_end */
3230                 if (bp->flash_info->buffered == 0) {
3231                         for (addr = data_end; addr < page_end;
3232                                 addr += 4, i += 4) {
3233
3234                                 if (addr == page_end-4) {
3235                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3236                                 }
3237                                 rc = bnx2_nvram_write_dword(bp, addr,
3238                                         &flash_buffer[i], cmd_flags);
3239
3240                                 if (rc != 0)
3241                                         goto nvram_write_end;
3242
3243                                 cmd_flags = 0;
3244                         }
3245                 }
3246
3247                 /* Disable writes to flash interface (lock write-protect) */
3248                 bnx2_disable_nvram_write(bp);
3249
3250                 /* Disable access to flash interface */
3251                 bnx2_disable_nvram_access(bp);
3252                 bnx2_release_nvram_lock(bp);
3253
3254                 /* Increment written */
3255                 written += data_end - data_start;
3256         }
3257
3258 nvram_write_end:
3259         kfree(flash_buffer);
3260         kfree(align_buf);
3261         return rc;
3262 }
3263
3264 static int
3265 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3266 {
3267         u32 val;
3268         int i, rc = 0;
3269
3270         /* Wait for the current PCI transaction to complete before
3271          * issuing a reset. */
3272         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3273                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3274                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3275                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3276                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3277         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3278         udelay(5);
3279
3280         /* Wait for the firmware to tell us it is ok to issue a reset. */
3281         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282
3283         /* Deposit a driver reset signature so the firmware knows that
3284          * this is a soft reset. */
3285         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3286                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287
3288         /* Do a dummy read to force the chip to complete all current transaction
3289          * before we issue a reset. */
3290         val = REG_RD(bp, BNX2_MISC_ID);
3291
3292         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3293                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3294                 REG_RD(bp, BNX2_MISC_COMMAND);
3295                 udelay(5);
3296
3297                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3298                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3299
3300                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3301
3302         } else {
3303                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3305                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3306
3307                 /* Chip reset. */
3308                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3309
3310                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3311                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3312                         current->state = TASK_UNINTERRUPTIBLE;
3313                         schedule_timeout(HZ / 50);
3314                 }
3315
3316                 /* Reset takes approximate 30 usec */
3317                 for (i = 0; i < 10; i++) {
3318                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3319                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3321                                 break;
3322                         udelay(10);
3323                 }
3324
3325                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3326                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3327                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3328                         return -EBUSY;
3329                 }
3330         }
3331
3332         /* Make sure byte swapping is properly configured. */
3333         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3334         if (val != 0x01020304) {
3335                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3336                 return -ENODEV;
3337         }
3338
3339         /* Wait for the firmware to finish its initialization. */
3340         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3341         if (rc)
3342                 return rc;
3343
3344         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3345                 /* Adjust the voltage regular to two steps lower.  The default
3346                  * of this register is 0x0000000e. */
3347                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3348
3349                 /* Remove bad rbuf memory from the free pool. */
3350                 rc = bnx2_alloc_bad_rbuf(bp);
3351         }
3352
3353         return rc;
3354 }
3355
3356 static int
3357 bnx2_init_chip(struct bnx2 *bp)
3358 {
3359         u32 val;
3360         int rc;
3361
3362         /* Make sure the interrupt is not active. */
3363         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3364
3365         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3366               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3367 #ifdef __BIG_ENDIAN
3368               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3369 #endif
3370               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3371               DMA_READ_CHANS << 12 |
3372               DMA_WRITE_CHANS << 16;
3373
3374         val |= (0x2 << 20) | (1 << 11);
3375
3376         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3377                 val |= (1 << 23);
3378
3379         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3380             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3381                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3382
3383         REG_WR(bp, BNX2_DMA_CONFIG, val);
3384
3385         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3386                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3387                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3388                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3389         }
3390
3391         if (bp->flags & PCIX_FLAG) {
3392                 u16 val16;
3393
3394                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3395                                      &val16);
3396                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397                                       val16 & ~PCI_X_CMD_ERO);
3398         }
3399
3400         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3401                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3402                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3403                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3404
3405         /* Initialize context mapping and zero out the quick contexts.  The
3406          * context block must have already been enabled. */
3407         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3408                 bnx2_init_5709_context(bp);
3409         else
3410                 bnx2_init_context(bp);
3411
3412         if ((rc = bnx2_init_cpus(bp)) != 0)
3413                 return rc;
3414
3415         bnx2_init_nvram(bp);
3416
3417         bnx2_set_mac_addr(bp);
3418
3419         val = REG_RD(bp, BNX2_MQ_CONFIG);
3420         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3421         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3422         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3423                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3424
3425         REG_WR(bp, BNX2_MQ_CONFIG, val);
3426
3427         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3430
3431         val = (BCM_PAGE_BITS - 8) << 24;
3432         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3433
3434         /* Configure page size. */
3435         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3439
3440         val = bp->mac_addr[0] +
3441               (bp->mac_addr[1] << 8) +
3442               (bp->mac_addr[2] << 16) +
3443               bp->mac_addr[3] +
3444               (bp->mac_addr[4] << 8) +
3445               (bp->mac_addr[5] << 16);
3446         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3447
3448         /* Program the MTU.  Also include 4 bytes for CRC32. */
3449         val = bp->dev->mtu + ETH_HLEN + 4;
3450         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3453
3454         bp->last_status_idx = 0;
3455         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3456
3457         /* Set up how to generate a link change interrupt. */
3458         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3459
3460         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461                (u64) bp->status_blk_mapping & 0xffffffff);
3462         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3463
3464         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465                (u64) bp->stats_blk_mapping & 0xffffffff);
3466         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467                (u64) bp->stats_blk_mapping >> 32);
3468
3469         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3470                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3471
3472         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3474
3475         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3477
3478         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3479
3480         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3481
3482         REG_WR(bp, BNX2_HC_COM_TICKS,
3483                (bp->com_ticks_int << 16) | bp->com_ticks);
3484
3485         REG_WR(bp, BNX2_HC_CMD_TICKS,
3486                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3487
3488         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3490
3491         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3493         else {
3494                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495                        BNX2_HC_CONFIG_TX_TMR_MODE |
3496                        BNX2_HC_CONFIG_COLLECT_STATS);
3497         }
3498
3499         /* Clear internal stats counters. */
3500         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3501
3502         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3503
3504         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505             BNX2_PORT_FEATURE_ASF_ENABLED)
3506                 bp->flags |= ASF_ENABLE_FLAG;
3507
3508         /* Initialize the receive filter. */
3509         bnx2_set_rx_mode(bp->dev);
3510
3511         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3512                           0);
3513
3514         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3516
3517         udelay(20);
3518
3519         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3520
3521         return rc;
3522 }
3523
3524 static void
3525 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3526 {
3527         u32 val, offset0, offset1, offset2, offset3;
3528
3529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530                 offset0 = BNX2_L2CTX_TYPE_XI;
3531                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3534         } else {
3535                 offset0 = BNX2_L2CTX_TYPE;
3536                 offset1 = BNX2_L2CTX_CMD_TYPE;
3537                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3539         }
3540         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3542
3543         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3545
3546         val = (u64) bp->tx_desc_mapping >> 32;
3547         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3548
3549         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3551 }
3552
3553 static void
3554 bnx2_init_tx_ring(struct bnx2 *bp)
3555 {
3556         struct tx_bd *txbd;
3557         u32 cid;
3558
3559         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3560
3561         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3562
3563         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3565
3566         bp->tx_prod = 0;
3567         bp->tx_cons = 0;
3568         bp->hw_tx_cons = 0;
3569         bp->tx_prod_bseq = 0;
3570
3571         cid = TX_CID;
3572         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3574
3575         bnx2_init_tx_context(bp, cid);
3576 }
3577
3578 static void
3579 bnx2_init_rx_ring(struct bnx2 *bp)
3580 {
3581         struct rx_bd *rxbd;
3582         int i;
3583         u16 prod, ring_prod;
3584         u32 val;
3585
3586         /* 8 for CRC and VLAN */
3587         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3588         /* hw alignment */
3589         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3590
3591         ring_prod = prod = bp->rx_prod = 0;
3592         bp->rx_cons = 0;
3593         bp->hw_rx_cons = 0;
3594         bp->rx_prod_bseq = 0;
3595
3596         for (i = 0; i < bp->rx_max_ring; i++) {
3597                 int j;
3598
3599                 rxbd = &bp->rx_desc_ring[i][0];
3600                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3602                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3603                 }
3604                 if (i == (bp->rx_max_ring - 1))
3605                         j = 0;
3606                 else
3607                         j = i + 1;
3608                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3610                                        0xffffffff;
3611         }
3612
3613         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3615         val |= 0x02 << 8;
3616         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3617
3618         val = (u64) bp->rx_desc_mapping[0] >> 32;
3619         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3620
3621         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3622         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3623
3624         for (i = 0; i < bp->rx_ring_size; i++) {
3625                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3626                         break;
3627                 }
3628                 prod = NEXT_RX_BD(prod);
3629                 ring_prod = RX_RING_IDX(prod);
3630         }
3631         bp->rx_prod = prod;
3632
3633         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3634
3635         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3636 }
3637
3638 static void
3639 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3640 {
3641         u32 num_rings, max;
3642
3643         bp->rx_ring_size = size;
3644         num_rings = 1;
3645         while (size > MAX_RX_DESC_CNT) {
3646                 size -= MAX_RX_DESC_CNT;
3647                 num_rings++;
3648         }
3649         /* round to next power of 2 */
3650         max = MAX_RX_RINGS;
3651         while ((max & num_rings) == 0)
3652                 max >>= 1;
3653
3654         if (num_rings != max)
3655                 max <<= 1;
3656
3657         bp->rx_max_ring = max;
3658         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3659 }
3660
3661 static void
3662 bnx2_free_tx_skbs(struct bnx2 *bp)
3663 {
3664         int i;
3665
3666         if (bp->tx_buf_ring == NULL)
3667                 return;
3668
3669         for (i = 0; i < TX_DESC_CNT; ) {
3670                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671                 struct sk_buff *skb = tx_buf->skb;
3672                 int j, last;
3673
3674                 if (skb == NULL) {
3675                         i++;
3676                         continue;
3677                 }
3678
3679                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680                         skb_headlen(skb), PCI_DMA_TODEVICE);
3681
3682                 tx_buf->skb = NULL;
3683
3684                 last = skb_shinfo(skb)->nr_frags;
3685                 for (j = 0; j < last; j++) {
3686                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3687                         pci_unmap_page(bp->pdev,
3688                                 pci_unmap_addr(tx_buf, mapping),
3689                                 skb_shinfo(skb)->frags[j].size,
3690                                 PCI_DMA_TODEVICE);
3691                 }
3692                 dev_kfree_skb(skb);
3693                 i += j + 1;
3694         }
3695
3696 }
3697
3698 static void
3699 bnx2_free_rx_skbs(struct bnx2 *bp)
3700 {
3701         int i;
3702
3703         if (bp->rx_buf_ring == NULL)
3704                 return;
3705
3706         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3707                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708                 struct sk_buff *skb = rx_buf->skb;
3709
3710                 if (skb == NULL)
3711                         continue;
3712
3713                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3715
3716                 rx_buf->skb = NULL;
3717
3718                 dev_kfree_skb(skb);
3719         }
3720 }
3721
3722 static void
3723 bnx2_free_skbs(struct bnx2 *bp)
3724 {
3725         bnx2_free_tx_skbs(bp);
3726         bnx2_free_rx_skbs(bp);
3727 }
3728
3729 static int
3730 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3731 {
3732         int rc;
3733
3734         rc = bnx2_reset_chip(bp, reset_code);
3735         bnx2_free_skbs(bp);
3736         if (rc)
3737                 return rc;
3738
3739         if ((rc = bnx2_init_chip(bp)) != 0)
3740                 return rc;
3741
3742         bnx2_init_tx_ring(bp);
3743         bnx2_init_rx_ring(bp);
3744         return 0;
3745 }
3746
3747 static int
3748 bnx2_init_nic(struct bnx2 *bp)
3749 {
3750         int rc;
3751
3752         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3753                 return rc;
3754
3755         spin_lock_bh(&bp->phy_lock);
3756         bnx2_init_phy(bp);
3757         spin_unlock_bh(&bp->phy_lock);
3758         bnx2_set_link(bp);
3759         return 0;
3760 }
3761
3762 static int
3763 bnx2_test_registers(struct bnx2 *bp)
3764 {
3765         int ret;
3766         int i, is_5709;
3767         static const struct {
3768                 u16   offset;
3769                 u16   flags;
3770 #define BNX2_FL_NOT_5709        1
3771                 u32   rw_mask;
3772                 u32   ro_mask;
3773         } reg_tbl[] = {
3774                 { 0x006c, 0, 0x00000000, 0x0000003f },
3775                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3776                 { 0x0094, 0, 0x00000000, 0x00000000 },
3777
3778                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
3779                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3780                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3781                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
3782                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
3783                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3784                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
3785                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3786                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3787
3788                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3789                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
3790                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3791                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3792                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3793                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
3794
3795                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
3796                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
3797                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
3798
3799                 { 0x1000, 0, 0x00000000, 0x00000001 },
3800                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3801
3802                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3803                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3804                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3805                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3806                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3807                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3808                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3809                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3810                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3811                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3812
3813                 { 0x1800, 0, 0x00000000, 0x00000001 },
3814                 { 0x1804, 0, 0x00000000, 0x00000003 },
3815
3816                 { 0x2800, 0, 0x00000000, 0x00000001 },
3817                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3818                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3819                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3820                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3821                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3822                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3823                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3824                 { 0x2840, 0, 0x00000000, 0xffffffff },
3825                 { 0x2844, 0, 0x00000000, 0xffffffff },
3826                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3827                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3828
3829                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3830                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3831
3832                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3833                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3834                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3835                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3836                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3837                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3838                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3839                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3840                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3841
3842                 { 0x5004, 0, 0x00000000, 0x0000007f },
3843                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3844
3845                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3854
3855                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3878
3879                 { 0xffff, 0, 0x00000000, 0x00000000 },
3880         };
3881
3882         ret = 0;
3883         is_5709 = 0;
3884         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3885                 is_5709 = 1;
3886
3887         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3888                 u32 offset, rw_mask, ro_mask, save_val, val;
3889                 u16 flags = reg_tbl[i].flags;
3890
3891                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
3892                         continue;
3893
3894                 offset = (u32) reg_tbl[i].offset;
3895                 rw_mask = reg_tbl[i].rw_mask;
3896                 ro_mask = reg_tbl[i].ro_mask;
3897
3898                 save_val = readl(bp->regview + offset);
3899
3900                 writel(0, bp->regview + offset);
3901
3902                 val = readl(bp->regview + offset);
3903                 if ((val & rw_mask) != 0) {
3904                         goto reg_test_err;
3905                 }
3906
3907                 if ((val & ro_mask) != (save_val & ro_mask)) {
3908                         goto reg_test_err;
3909                 }
3910
3911                 writel(0xffffffff, bp->regview + offset);
3912
3913                 val = readl(bp->regview + offset);
3914                 if ((val & rw_mask) != rw_mask) {
3915                         goto reg_test_err;
3916                 }
3917
3918                 if ((val & ro_mask) != (save_val & ro_mask)) {
3919                         goto reg_test_err;
3920                 }
3921
3922                 writel(save_val, bp->regview + offset);
3923                 continue;
3924
3925 reg_test_err:
3926                 writel(save_val, bp->regview + offset);
3927                 ret = -ENODEV;
3928                 break;
3929         }
3930         return ret;
3931 }
3932
3933 static int
3934 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3935 {
3936         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3937                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3938         int i;
3939
3940         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3941                 u32 offset;
3942
3943                 for (offset = 0; offset < size; offset += 4) {
3944
3945                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3946
3947                         if (REG_RD_IND(bp, start + offset) !=
3948                                 test_pattern[i]) {
3949                                 return -ENODEV;
3950                         }
3951                 }
3952         }
3953         return 0;
3954 }
3955
3956 static int
3957 bnx2_test_memory(struct bnx2 *bp)
3958 {
3959         int ret = 0;
3960         int i;
3961         static struct mem_entry {
3962                 u32   offset;
3963                 u32   len;
3964         } mem_tbl_5706[] = {
3965                 { 0x60000,  0x4000 },
3966                 { 0xa0000,  0x3000 },
3967                 { 0xe0000,  0x4000 },
3968                 { 0x120000, 0x4000 },
3969                 { 0x1a0000, 0x4000 },
3970                 { 0x160000, 0x4000 },
3971                 { 0xffffffff, 0    },
3972         },
3973         mem_tbl_5709[] = {
3974                 { 0x60000,  0x4000 },
3975                 { 0xa0000,  0x3000 },
3976                 { 0xe0000,  0x4000 },
3977                 { 0x120000, 0x4000 },
3978                 { 0x1a0000, 0x4000 },
3979                 { 0xffffffff, 0    },
3980         };
3981         struct mem_entry *mem_tbl;
3982
3983         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3984                 mem_tbl = mem_tbl_5709;
3985         else
3986                 mem_tbl = mem_tbl_5706;
3987
3988         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3989                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3990                         mem_tbl[i].len)) != 0) {
3991                         return ret;
3992                 }
3993         }
3994
3995         return ret;
3996 }
3997
3998 #define BNX2_MAC_LOOPBACK       0
3999 #define BNX2_PHY_LOOPBACK       1
4000
4001 static int
4002 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4003 {
4004         unsigned int pkt_size, num_pkts, i;
4005         struct sk_buff *skb, *rx_skb;
4006         unsigned char *packet;
4007         u16 rx_start_idx, rx_idx;
4008         dma_addr_t map;
4009         struct tx_bd *txbd;
4010         struct sw_bd *rx_buf;
4011         struct l2_fhdr *rx_hdr;
4012         int ret = -ENODEV;
4013
4014         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4015                 bp->loopback = MAC_LOOPBACK;
4016                 bnx2_set_mac_loopback(bp);
4017         }
4018         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4019                 bp->loopback = PHY_LOOPBACK;
4020                 bnx2_set_phy_loopback(bp);
4021         }
4022         else
4023                 return -EINVAL;
4024
4025         pkt_size = 1514;
4026         skb = netdev_alloc_skb(bp->dev, pkt_size);
4027         if (!skb)
4028                 return -ENOMEM;
4029         packet = skb_put(skb, pkt_size);
4030         memcpy(packet, bp->dev->dev_addr, 6);
4031         memset(packet + 6, 0x0, 8);
4032         for (i = 14; i < pkt_size; i++)
4033                 packet[i] = (unsigned char) (i & 0xff);
4034
4035         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4036                 PCI_DMA_TODEVICE);
4037
4038         REG_WR(bp, BNX2_HC_COMMAND,
4039                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4040
4041         REG_RD(bp, BNX2_HC_COMMAND);
4042
4043         udelay(5);
4044         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4045
4046         num_pkts = 0;
4047
4048         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4049
4050         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4051         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4052         txbd->tx_bd_mss_nbytes = pkt_size;
4053         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4054
4055         num_pkts++;
4056         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4057         bp->tx_prod_bseq += pkt_size;
4058
4059         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4060         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4061
4062         udelay(100);
4063
4064         REG_WR(bp, BNX2_HC_COMMAND,
4065                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4066
4067         REG_RD(bp, BNX2_HC_COMMAND);
4068
4069         udelay(5);
4070
4071         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4072         dev_kfree_skb(skb);
4073
4074         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4075                 goto loopback_test_done;
4076         }
4077
4078         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4079         if (rx_idx != rx_start_idx + num_pkts) {
4080                 goto loopback_test_done;
4081         }
4082
4083         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4084         rx_skb = rx_buf->skb;
4085
4086         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4087         skb_reserve(rx_skb, bp->rx_offset);
4088
4089         pci_dma_sync_single_for_cpu(bp->pdev,
4090                 pci_unmap_addr(rx_buf, mapping),
4091                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4092
4093         if (rx_hdr->l2_fhdr_status &
4094                 (L2_FHDR_ERRORS_BAD_CRC |
4095                 L2_FHDR_ERRORS_PHY_DECODE |
4096                 L2_FHDR_ERRORS_ALIGNMENT |
4097                 L2_FHDR_ERRORS_TOO_SHORT |
4098                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4099
4100                 goto loopback_test_done;
4101         }
4102
4103         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4104                 goto loopback_test_done;
4105         }
4106
4107         for (i = 14; i < pkt_size; i++) {
4108                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4109                         goto loopback_test_done;
4110                 }
4111         }
4112
4113         ret = 0;
4114
4115 loopback_test_done:
4116         bp->loopback = 0;
4117         return ret;
4118 }
4119
4120 #define BNX2_MAC_LOOPBACK_FAILED        1
4121 #define BNX2_PHY_LOOPBACK_FAILED        2
4122 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4123                                          BNX2_PHY_LOOPBACK_FAILED)
4124
4125 static int
4126 bnx2_test_loopback(struct bnx2 *bp)
4127 {
4128         int rc = 0;
4129
4130         if (!netif_running(bp->dev))
4131                 return BNX2_LOOPBACK_FAILED;
4132
4133         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4134         spin_lock_bh(&bp->phy_lock);
4135         bnx2_init_phy(bp);
4136         spin_unlock_bh(&bp->phy_lock);
4137         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4138                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4139         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4140                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4141         return rc;
4142 }
4143
4144 #define NVRAM_SIZE 0x200
4145 #define CRC32_RESIDUAL 0xdebb20e3
4146
4147 static int
4148 bnx2_test_nvram(struct bnx2 *bp)
4149 {
4150         u32 buf[NVRAM_SIZE / 4];
4151         u8 *data = (u8 *) buf;
4152         int rc = 0;
4153         u32 magic, csum;
4154
4155         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4156                 goto test_nvram_done;
4157
4158         magic = be32_to_cpu(buf[0]);
4159         if (magic != 0x669955aa) {
4160                 rc = -ENODEV;
4161                 goto test_nvram_done;
4162         }
4163
4164         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4165                 goto test_nvram_done;
4166
4167         csum = ether_crc_le(0x100, data);
4168         if (csum != CRC32_RESIDUAL) {
4169                 rc = -ENODEV;
4170                 goto test_nvram_done;
4171         }
4172
4173         csum = ether_crc_le(0x100, data + 0x100);
4174         if (csum != CRC32_RESIDUAL) {
4175                 rc = -ENODEV;
4176         }
4177
4178 test_nvram_done:
4179         return rc;
4180 }
4181
4182 static int
4183 bnx2_test_link(struct bnx2 *bp)
4184 {
4185         u32 bmsr;
4186
4187         spin_lock_bh(&bp->phy_lock);
4188         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4189         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4190         spin_unlock_bh(&bp->phy_lock);
4191
4192         if (bmsr & BMSR_LSTATUS) {
4193                 return 0;
4194         }
4195         return -ENODEV;
4196 }
4197
4198 static int
4199 bnx2_test_intr(struct bnx2 *bp)
4200 {
4201         int i;
4202         u16 status_idx;
4203
4204         if (!netif_running(bp->dev))
4205                 return -ENODEV;
4206
4207         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4208
4209         /* This register is not touched during run-time. */
4210         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4211         REG_RD(bp, BNX2_HC_COMMAND);
4212
4213         for (i = 0; i < 10; i++) {
4214                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4215                         status_idx) {
4216
4217                         break;
4218                 }
4219
4220                 msleep_interruptible(10);
4221         }
4222         if (i < 10)
4223                 return 0;
4224
4225         return -ENODEV;
4226 }
4227
4228 static void
4229 bnx2_5706_serdes_timer(struct bnx2 *bp)
4230 {
4231         spin_lock(&bp->phy_lock);
4232         if (bp->serdes_an_pending)
4233                 bp->serdes_an_pending--;
4234         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4235                 u32 bmcr;
4236
4237                 bp->current_interval = bp->timer_interval;
4238
4239                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4240
4241                 if (bmcr & BMCR_ANENABLE) {
4242                         u32 phy1, phy2;
4243
4244                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4245                         bnx2_read_phy(bp, 0x1c, &phy1);
4246
4247                         bnx2_write_phy(bp, 0x17, 0x0f01);
4248                         bnx2_read_phy(bp, 0x15, &phy2);
4249                         bnx2_write_phy(bp, 0x17, 0x0f01);
4250                         bnx2_read_phy(bp, 0x15, &phy2);
4251
4252                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4253                                 !(phy2 & 0x20)) {       /* no CONFIG */
4254
4255                                 bmcr &= ~BMCR_ANENABLE;
4256                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4257                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4258                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4259                         }
4260                 }
4261         }
4262         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4263                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4264                 u32 phy2;
4265
4266                 bnx2_write_phy(bp, 0x17, 0x0f01);
4267                 bnx2_read_phy(bp, 0x15, &phy2);
4268                 if (phy2 & 0x20) {
4269                         u32 bmcr;
4270
4271                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272                         bmcr |= BMCR_ANENABLE;
4273                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4274
4275                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4276                 }
4277         } else
4278                 bp->current_interval = bp->timer_interval;
4279
4280         spin_unlock(&bp->phy_lock);
4281 }
4282
4283 static void
4284 bnx2_5708_serdes_timer(struct bnx2 *bp)
4285 {
4286         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4287                 bp->serdes_an_pending = 0;
4288                 return;
4289         }
4290
4291         spin_lock(&bp->phy_lock);
4292         if (bp->serdes_an_pending)
4293                 bp->serdes_an_pending--;
4294         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4295                 u32 bmcr;
4296
4297                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4298
4299                 if (bmcr & BMCR_ANENABLE) {
4300                         bmcr &= ~BMCR_ANENABLE;
4301                         bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4302                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4303                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4304                 } else {
4305                         bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4306                         bmcr |= BMCR_ANENABLE;
4307                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4308                         bp->serdes_an_pending = 2;
4309                         bp->current_interval = bp->timer_interval;
4310                 }
4311
4312         } else
4313                 bp->current_interval = bp->timer_interval;
4314
4315         spin_unlock(&bp->phy_lock);
4316 }
4317
4318 static void
4319 bnx2_timer(unsigned long data)
4320 {
4321         struct bnx2 *bp = (struct bnx2 *) data;
4322         u32 msg;
4323
4324         if (!netif_running(bp->dev))
4325                 return;
4326
4327         if (atomic_read(&bp->intr_sem) != 0)
4328                 goto bnx2_restart_timer;
4329
4330         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4331         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4332
4333         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4334
4335         if (bp->phy_flags & PHY_SERDES_FLAG) {
4336                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4337                         bnx2_5706_serdes_timer(bp);
4338                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4339                         bnx2_5708_serdes_timer(bp);
4340         }
4341
4342 bnx2_restart_timer:
4343         mod_timer(&bp->timer, jiffies + bp->current_interval);
4344 }
4345
4346 /* Called with rtnl_lock */
4347 static int
4348 bnx2_open(struct net_device *dev)
4349 {
4350         struct bnx2 *bp = netdev_priv(dev);
4351         int rc;
4352
4353         bnx2_set_power_state(bp, PCI_D0);
4354         bnx2_disable_int(bp);
4355
4356         rc = bnx2_alloc_mem(bp);
4357         if (rc)
4358                 return rc;
4359
4360         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4361                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4362                 !disable_msi) {
4363
4364                 if (pci_enable_msi(bp->pdev) == 0) {
4365                         bp->flags |= USING_MSI_FLAG;
4366                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4367                                         dev);
4368                 }
4369                 else {
4370                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4371                                         IRQF_SHARED, dev->name, dev);
4372                 }
4373         }
4374         else {
4375                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4376                                 dev->name, dev);
4377         }
4378         if (rc) {
4379                 bnx2_free_mem(bp);
4380                 return rc;
4381         }
4382
4383         rc = bnx2_init_nic(bp);
4384
4385         if (rc) {
4386                 free_irq(bp->pdev->irq, dev);
4387                 if (bp->flags & USING_MSI_FLAG) {
4388                         pci_disable_msi(bp->pdev);
4389                         bp->flags &= ~USING_MSI_FLAG;
4390                 }
4391                 bnx2_free_skbs(bp);
4392                 bnx2_free_mem(bp);
4393                 return rc;
4394         }
4395
4396         mod_timer(&bp->timer, jiffies + bp->current_interval);
4397
4398         atomic_set(&bp->intr_sem, 0);
4399
4400         bnx2_enable_int(bp);
4401
4402         if (bp->flags & USING_MSI_FLAG) {
4403                 /* Test MSI to make sure it is working
4404                  * If MSI test fails, go back to INTx mode
4405                  */
4406                 if (bnx2_test_intr(bp) != 0) {
4407                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4408                                " using MSI, switching to INTx mode. Please"
4409                                " report this failure to the PCI maintainer"
4410                                " and include system chipset information.\n",
4411                                bp->dev->name);
4412
4413                         bnx2_disable_int(bp);
4414                         free_irq(bp->pdev->irq, dev);
4415                         pci_disable_msi(bp->pdev);
4416                         bp->flags &= ~USING_MSI_FLAG;
4417
4418                         rc = bnx2_init_nic(bp);
4419
4420                         if (!rc) {
4421                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4422                                         IRQF_SHARED, dev->name, dev);
4423                         }
4424                         if (rc) {
4425                                 bnx2_free_skbs(bp);
4426                                 bnx2_free_mem(bp);
4427                                 del_timer_sync(&bp->timer);
4428                                 return rc;
4429                         }
4430                         bnx2_enable_int(bp);
4431                 }
4432         }
4433         if (bp->flags & USING_MSI_FLAG) {
4434                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4435         }
4436
4437         netif_start_queue(dev);
4438
4439         return 0;
4440 }
4441
4442 static void
4443 bnx2_reset_task(struct work_struct *work)
4444 {
4445         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4446
4447         if (!netif_running(bp->dev))
4448                 return;
4449
4450         bp->in_reset_task = 1;
4451         bnx2_netif_stop(bp);
4452
4453         bnx2_init_nic(bp);
4454
4455         atomic_set(&bp->intr_sem, 1);
4456         bnx2_netif_start(bp);
4457         bp->in_reset_task = 0;
4458 }
4459
4460 static void
4461 bnx2_tx_timeout(struct net_device *dev)
4462 {
4463         struct bnx2 *bp = netdev_priv(dev);
4464
4465         /* This allows the netif to be shutdown gracefully before resetting */
4466         schedule_work(&bp->reset_task);
4467 }
4468
4469 #ifdef BCM_VLAN
4470 /* Called with rtnl_lock */
4471 static void
4472 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4473 {
4474         struct bnx2 *bp = netdev_priv(dev);
4475
4476         bnx2_netif_stop(bp);
4477
4478         bp->vlgrp = vlgrp;
4479         bnx2_set_rx_mode(dev);
4480
4481         bnx2_netif_start(bp);
4482 }
4483
4484 /* Called with rtnl_lock */
4485 static void
4486 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4487 {
4488         struct bnx2 *bp = netdev_priv(dev);
4489
4490         bnx2_netif_stop(bp);
4491         vlan_group_set_device(bp->vlgrp, vid, NULL);
4492         bnx2_set_rx_mode(dev);
4493
4494         bnx2_netif_start(bp);
4495 }
4496 #endif
4497
4498 /* Called with netif_tx_lock.
4499  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4500  * netif_wake_queue().
4501  */
4502 static int
4503 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4504 {
4505         struct bnx2 *bp = netdev_priv(dev);
4506         dma_addr_t mapping;
4507         struct tx_bd *txbd;
4508         struct sw_bd *tx_buf;
4509         u32 len, vlan_tag_flags, last_frag, mss;
4510         u16 prod, ring_prod;
4511         int i;
4512
4513         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4514                 netif_stop_queue(dev);
4515                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4516                         dev->name);
4517
4518                 return NETDEV_TX_BUSY;
4519         }
4520         len = skb_headlen(skb);
4521         prod = bp->tx_prod;
4522         ring_prod = TX_RING_IDX(prod);
4523
4524         vlan_tag_flags = 0;
4525         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4526                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4527         }
4528
4529         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4530                 vlan_tag_flags |=
4531                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4532         }
4533         if ((mss = skb_shinfo(skb)->gso_size) &&
4534                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4535                 u32 tcp_opt_len, ip_tcp_len;
4536                 struct iphdr *iph;
4537
4538                 if (skb_header_cloned(skb) &&
4539                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4540                         dev_kfree_skb(skb);
4541                         return NETDEV_TX_OK;
4542                 }
4543
4544                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4545
4546                 tcp_opt_len = 0;
4547                 if (tcp_hdr(skb)->doff > 5)
4548                         tcp_opt_len = tcp_optlen(skb);
4549
4550                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4551
4552                 iph = ip_hdr(skb);
4553                 iph->check = 0;
4554                 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4555                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4556                                                          iph->daddr, 0,
4557                                                          IPPROTO_TCP, 0);
4558                 if (tcp_opt_len || (iph->ihl > 5)) {
4559                         vlan_tag_flags |= ((iph->ihl - 5) +
4560                                            (tcp_opt_len >> 2)) << 8;
4561                 }
4562         }
4563         else
4564         {
4565                 mss = 0;
4566         }
4567
4568         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4569
4570         tx_buf = &bp->tx_buf_ring[ring_prod];
4571         tx_buf->skb = skb;
4572         pci_unmap_addr_set(tx_buf, mapping, mapping);
4573
4574         txbd = &bp->tx_desc_ring[ring_prod];
4575
4576         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4577         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4578         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4579         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4580
4581         last_frag = skb_shinfo(skb)->nr_frags;
4582
4583         for (i = 0; i < last_frag; i++) {
4584                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4585
4586                 prod = NEXT_TX_BD(prod);
4587                 ring_prod = TX_RING_IDX(prod);
4588                 txbd = &bp->tx_desc_ring[ring_prod];
4589
4590                 len = frag->size;
4591                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4592                         len, PCI_DMA_TODEVICE);
4593                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4594                                 mapping, mapping);
4595
4596                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4597                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4598                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4599                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4600
4601         }
4602         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4603
4604         prod = NEXT_TX_BD(prod);
4605         bp->tx_prod_bseq += skb->len;
4606
4607         REG_WR16(bp, bp->tx_bidx_addr, prod);
4608         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4609
4610         mmiowb();
4611
4612         bp->tx_prod = prod;
4613         dev->trans_start = jiffies;
4614
4615         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4616                 netif_stop_queue(dev);
4617                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4618                         netif_wake_queue(dev);
4619         }
4620
4621         return NETDEV_TX_OK;
4622 }
4623
4624 /* Called with rtnl_lock */
4625 static int
4626 bnx2_close(struct net_device *dev)
4627 {
4628         struct bnx2 *bp = netdev_priv(dev);
4629         u32 reset_code;
4630
4631         /* Calling flush_scheduled_work() may deadlock because
4632          * linkwatch_event() may be on the workqueue and it will try to get
4633          * the rtnl_lock which we are holding.
4634          */
4635         while (bp->in_reset_task)
4636                 msleep(1);
4637
4638         bnx2_netif_stop(bp);
4639         del_timer_sync(&bp->timer);
4640         if (bp->flags & NO_WOL_FLAG)
4641                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4642         else if (bp->wol)
4643                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4644         else
4645                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4646         bnx2_reset_chip(bp, reset_code);
4647         free_irq(bp->pdev->irq, dev);
4648         if (bp->flags & USING_MSI_FLAG) {
4649                 pci_disable_msi(bp->pdev);
4650                 bp->flags &= ~USING_MSI_FLAG;
4651         }
4652         bnx2_free_skbs(bp);
4653         bnx2_free_mem(bp);
4654         bp->link_up = 0;
4655         netif_carrier_off(bp->dev);
4656         bnx2_set_power_state(bp, PCI_D3hot);
4657         return 0;
4658 }
4659
4660 #define GET_NET_STATS64(ctr)                                    \
4661         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4662         (unsigned long) (ctr##_lo)
4663
4664 #define GET_NET_STATS32(ctr)            \
4665         (ctr##_lo)
4666
4667 #if (BITS_PER_LONG == 64)
4668 #define GET_NET_STATS   GET_NET_STATS64
4669 #else
4670 #define GET_NET_STATS   GET_NET_STATS32
4671 #endif
4672
4673 static struct net_device_stats *
4674 bnx2_get_stats(struct net_device *dev)
4675 {
4676         struct bnx2 *bp = netdev_priv(dev);
4677         struct statistics_block *stats_blk = bp->stats_blk;
4678         struct net_device_stats *net_stats = &bp->net_stats;
4679
4680         if (bp->stats_blk == NULL) {
4681                 return net_stats;
4682         }
4683         net_stats->rx_packets =
4684                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4685                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4686                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4687
4688         net_stats->tx_packets =
4689                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4690                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4691                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4692
4693         net_stats->rx_bytes =
4694                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4695
4696         net_stats->tx_bytes =
4697                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4698
4699         net_stats->multicast =
4700                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4701
4702         net_stats->collisions =
4703                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4704
4705         net_stats->rx_length_errors =
4706                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4707                 stats_blk->stat_EtherStatsOverrsizePkts);
4708
4709         net_stats->rx_over_errors =
4710                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4711
4712         net_stats->rx_frame_errors =
4713                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4714
4715         net_stats->rx_crc_errors =
4716                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4717
4718         net_stats->rx_errors = net_stats->rx_length_errors +
4719                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4720                 net_stats->rx_crc_errors;
4721
4722         net_stats->tx_aborted_errors =
4723                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4724                 stats_blk->stat_Dot3StatsLateCollisions);
4725
4726         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4727             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4728                 net_stats->tx_carrier_errors = 0;
4729         else {
4730                 net_stats->tx_carrier_errors =
4731                         (unsigned long)
4732                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4733         }
4734
4735         net_stats->tx_errors =
4736                 (unsigned long)
4737                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4738                 +
4739                 net_stats->tx_aborted_errors +
4740                 net_stats->tx_carrier_errors;
4741
4742         net_stats->rx_missed_errors =
4743                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4744                 stats_blk->stat_FwRxDrop);
4745
4746         return net_stats;
4747 }
4748
4749 /* All ethtool functions called with rtnl_lock */
4750
4751 static int
4752 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4753 {
4754         struct bnx2 *bp = netdev_priv(dev);
4755
4756         cmd->supported = SUPPORTED_Autoneg;
4757         if (bp->phy_flags & PHY_SERDES_FLAG) {
4758                 cmd->supported |= SUPPORTED_1000baseT_Full |
4759                         SUPPORTED_FIBRE;
4760
4761                 cmd->port = PORT_FIBRE;
4762         }
4763         else {
4764                 cmd->supported |= SUPPORTED_10baseT_Half |
4765                         SUPPORTED_10baseT_Full |
4766                         SUPPORTED_100baseT_Half |
4767                         SUPPORTED_100baseT_Full |
4768                         SUPPORTED_1000baseT_Full |
4769                         SUPPORTED_TP;
4770
4771                 cmd->port = PORT_TP;
4772         }
4773
4774         cmd->advertising = bp->advertising;
4775
4776         if (bp->autoneg & AUTONEG_SPEED) {
4777                 cmd->autoneg = AUTONEG_ENABLE;
4778         }
4779         else {
4780                 cmd->autoneg = AUTONEG_DISABLE;
4781         }
4782
4783         if (netif_carrier_ok(dev)) {
4784                 cmd->speed = bp->line_speed;
4785                 cmd->duplex = bp->duplex;
4786         }
4787         else {
4788                 cmd->speed = -1;
4789                 cmd->duplex = -1;
4790         }
4791
4792         cmd->transceiver = XCVR_INTERNAL;
4793         cmd->phy_address = bp->phy_addr;
4794
4795         return 0;
4796 }
4797
4798 static int
4799 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4800 {
4801         struct bnx2 *bp = netdev_priv(dev);
4802         u8 autoneg = bp->autoneg;
4803         u8 req_duplex = bp->req_duplex;
4804         u16 req_line_speed = bp->req_line_speed;
4805         u32 advertising = bp->advertising;
4806
4807         if (cmd->autoneg == AUTONEG_ENABLE) {
4808                 autoneg |= AUTONEG_SPEED;
4809
4810                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4811
4812                 /* allow advertising 1 speed */
4813                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4814                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4815                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4816                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4817
4818                         if (bp->phy_flags & PHY_SERDES_FLAG)
4819                                 return -EINVAL;
4820
4821                         advertising = cmd->advertising;
4822
4823                 }
4824                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4825                         advertising = cmd->advertising;
4826                 }
4827                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4828                         return -EINVAL;
4829                 }
4830                 else {
4831                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4832                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4833                         }
4834                         else {
4835                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4836                         }
4837                 }
4838                 advertising |= ADVERTISED_Autoneg;
4839         }
4840         else {
4841                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4842                         if ((cmd->speed != SPEED_1000 &&
4843                              cmd->speed != SPEED_2500) ||
4844                             (cmd->duplex != DUPLEX_FULL))
4845                                 return -EINVAL;
4846
4847                         if (cmd->speed == SPEED_2500 &&
4848                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4849                                 return -EINVAL;
4850                 }
4851                 else if (cmd->speed == SPEED_1000) {
4852                         return -EINVAL;
4853                 }
4854                 autoneg &= ~AUTONEG_SPEED;
4855                 req_line_speed = cmd->speed;
4856                 req_duplex = cmd->duplex;
4857                 advertising = 0;
4858         }
4859
4860         bp->autoneg = autoneg;
4861         bp->advertising = advertising;
4862         bp->req_line_speed = req_line_speed;
4863         bp->req_duplex = req_duplex;
4864
4865         spin_lock_bh(&bp->phy_lock);
4866
4867         bnx2_setup_phy(bp);
4868
4869         spin_unlock_bh(&bp->phy_lock);
4870
4871         return 0;
4872 }
4873
4874 static void
4875 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4876 {
4877         struct bnx2 *bp = netdev_priv(dev);
4878
4879         strcpy(info->driver, DRV_MODULE_NAME);
4880         strcpy(info->version, DRV_MODULE_VERSION);
4881         strcpy(info->bus_info, pci_name(bp->pdev));
4882         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4883         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4884         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4885         info->fw_version[1] = info->fw_version[3] = '.';
4886         info->fw_version[5] = 0;
4887 }
4888
4889 #define BNX2_REGDUMP_LEN                (32 * 1024)
4890
4891 static int
4892 bnx2_get_regs_len(struct net_device *dev)
4893 {
4894         return BNX2_REGDUMP_LEN;
4895 }
4896
4897 static void
4898 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4899 {
4900         u32 *p = _p, i, offset;
4901         u8 *orig_p = _p;
4902         struct bnx2 *bp = netdev_priv(dev);
4903         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4904                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4905                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4906                                  0x1040, 0x1048, 0x1080, 0x10a4,
4907                                  0x1400, 0x1490, 0x1498, 0x14f0,
4908                                  0x1500, 0x155c, 0x1580, 0x15dc,
4909                                  0x1600, 0x1658, 0x1680, 0x16d8,
4910                                  0x1800, 0x1820, 0x1840, 0x1854,
4911                                  0x1880, 0x1894, 0x1900, 0x1984,
4912                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4913                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4914                                  0x2000, 0x2030, 0x23c0, 0x2400,
4915                                  0x2800, 0x2820, 0x2830, 0x2850,
4916                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4917                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4918                                  0x4080, 0x4090, 0x43c0, 0x4458,
4919                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4920                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4921                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4922                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4923                                  0x6800, 0x6848, 0x684c, 0x6860,
4924                                  0x6888, 0x6910, 0x8000 };
4925
4926         regs->version = 0;
4927
4928         memset(p, 0, BNX2_REGDUMP_LEN);
4929
4930         if (!netif_running(bp->dev))
4931                 return;
4932
4933         i = 0;
4934         offset = reg_boundaries[0];
4935         p += offset;
4936         while (offset < BNX2_REGDUMP_LEN) {
4937                 *p++ = REG_RD(bp, offset);
4938                 offset += 4;
4939                 if (offset == reg_boundaries[i + 1]) {
4940                         offset = reg_boundaries[i + 2];
4941                         p = (u32 *) (orig_p + offset);
4942                         i += 2;
4943                 }
4944         }
4945 }
4946
4947 static void
4948 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4949 {
4950         struct bnx2 *bp = netdev_priv(dev);
4951
4952         if (bp->flags & NO_WOL_FLAG) {
4953                 wol->supported = 0;
4954                 wol->wolopts = 0;
4955         }
4956         else {
4957                 wol->supported = WAKE_MAGIC;
4958                 if (bp->wol)
4959                         wol->wolopts = WAKE_MAGIC;
4960                 else
4961                         wol->wolopts = 0;
4962         }
4963         memset(&wol->sopass, 0, sizeof(wol->sopass));
4964 }
4965
4966 static int
4967 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4968 {
4969         struct bnx2 *bp = netdev_priv(dev);
4970
4971         if (wol->wolopts & ~WAKE_MAGIC)
4972                 return -EINVAL;
4973
4974         if (wol->wolopts & WAKE_MAGIC) {
4975                 if (bp->flags & NO_WOL_FLAG)
4976                         return -EINVAL;
4977
4978                 bp->wol = 1;
4979         }
4980         else {
4981                 bp->wol = 0;
4982         }
4983         return 0;
4984 }
4985
4986 static int
4987 bnx2_nway_reset(struct net_device *dev)
4988 {
4989         struct bnx2 *bp = netdev_priv(dev);
4990         u32 bmcr;
4991
4992         if (!(bp->autoneg & AUTONEG_SPEED)) {
4993                 return -EINVAL;
4994         }
4995
4996         spin_lock_bh(&bp->phy_lock);
4997
4998         /* Force a link down visible on the other side */
4999         if (bp->phy_flags & PHY_SERDES_FLAG) {
5000                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
5001                 spin_unlock_bh(&bp->phy_lock);
5002
5003                 msleep(20);
5004
5005                 spin_lock_bh(&bp->phy_lock);
5006
5007                 bp->current_interval = SERDES_AN_TIMEOUT;
5008                 bp->serdes_an_pending = 1;
5009                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5010         }
5011
5012         bnx2_read_phy(bp, MII_BMCR, &bmcr);
5013         bmcr &= ~BMCR_LOOPBACK;
5014         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5015
5016         spin_unlock_bh(&bp->phy_lock);
5017
5018         return 0;
5019 }
5020
5021 static int
5022 bnx2_get_eeprom_len(struct net_device *dev)
5023 {
5024         struct bnx2 *bp = netdev_priv(dev);
5025
5026         if (bp->flash_info == NULL)
5027                 return 0;
5028
5029         return (int) bp->flash_size;
5030 }
5031
5032 static int
5033 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5034                 u8 *eebuf)
5035 {
5036         struct bnx2 *bp = netdev_priv(dev);
5037         int rc;
5038
5039         /* parameters already validated in ethtool_get_eeprom */
5040
5041         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5042
5043         return rc;
5044 }
5045
5046 static int
5047 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5048                 u8 *eebuf)
5049 {
5050         struct bnx2 *bp = netdev_priv(dev);
5051         int rc;
5052
5053         /* parameters already validated in ethtool_set_eeprom */
5054
5055         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5056
5057         return rc;
5058 }
5059
5060 static int
5061 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5062 {
5063         struct bnx2 *bp = netdev_priv(dev);
5064
5065         memset(coal, 0, sizeof(struct ethtool_coalesce));
5066
5067         coal->rx_coalesce_usecs = bp->rx_ticks;
5068         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5069         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5070         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5071
5072         coal->tx_coalesce_usecs = bp->tx_ticks;
5073         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5074         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5075         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5076
5077         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5078
5079         return 0;
5080 }
5081
5082 static int
5083 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5084 {
5085         struct bnx2 *bp = netdev_priv(dev);
5086
5087         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5088         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5089
5090         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5091         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5092
5093         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5094         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5095
5096         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5097         if (bp->rx_quick_cons_trip_int > 0xff)
5098                 bp->rx_quick_cons_trip_int = 0xff;
5099
5100         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5101         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5102
5103         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5104         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5105
5106         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5107         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5108
5109         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5110         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5111                 0xff;
5112
5113         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5114         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5115         bp->stats_ticks &= 0xffff00;
5116
5117         if (netif_running(bp->dev)) {
5118                 bnx2_netif_stop(bp);
5119                 bnx2_init_nic(bp);
5120                 bnx2_netif_start(bp);
5121         }
5122
5123         return 0;
5124 }
5125
5126 static void
5127 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5128 {
5129         struct bnx2 *bp = netdev_priv(dev);
5130
5131         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5132         ering->rx_mini_max_pending = 0;
5133         ering->rx_jumbo_max_pending = 0;
5134
5135         ering->rx_pending = bp->rx_ring_size;
5136         ering->rx_mini_pending = 0;
5137         ering->rx_jumbo_pending = 0;
5138
5139         ering->tx_max_pending = MAX_TX_DESC_CNT;
5140         ering->tx_pending = bp->tx_ring_size;
5141 }
5142
5143 static int
5144 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5145 {
5146         struct bnx2 *bp = netdev_priv(dev);
5147
5148         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5149                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5150                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5151
5152                 return -EINVAL;
5153         }
5154         if (netif_running(bp->dev)) {
5155                 bnx2_netif_stop(bp);
5156                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5157                 bnx2_free_skbs(bp);
5158                 bnx2_free_mem(bp);
5159         }
5160
5161         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5162         bp->tx_ring_size = ering->tx_pending;
5163
5164         if (netif_running(bp->dev)) {
5165                 int rc;
5166
5167                 rc = bnx2_alloc_mem(bp);
5168                 if (rc)
5169                         return rc;
5170                 bnx2_init_nic(bp);
5171                 bnx2_netif_start(bp);
5172         }
5173
5174         return 0;
5175 }
5176
5177 static void
5178 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5179 {
5180         struct bnx2 *bp = netdev_priv(dev);
5181
5182         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5183         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5184         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5185 }
5186
5187 static int
5188 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5189 {
5190         struct bnx2 *bp = netdev_priv(dev);
5191
5192         bp->req_flow_ctrl = 0;
5193         if (epause->rx_pause)
5194                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5195         if (epause->tx_pause)
5196                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5197
5198         if (epause->autoneg) {
5199                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5200         }
5201         else {
5202                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5203         }
5204
5205         spin_lock_bh(&bp->phy_lock);
5206
5207         bnx2_setup_phy(bp);
5208
5209         spin_unlock_bh(&bp->phy_lock);
5210
5211         return 0;
5212 }
5213
5214 static u32
5215 bnx2_get_rx_csum(struct net_device *dev)
5216 {
5217         struct bnx2 *bp = netdev_priv(dev);
5218
5219         return bp->rx_csum;
5220 }
5221
5222 static int
5223 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5224 {
5225         struct bnx2 *bp = netdev_priv(dev);
5226
5227         bp->rx_csum = data;
5228         return 0;
5229 }
5230
5231 static int
5232 bnx2_set_tso(struct net_device *dev, u32 data)
5233 {
5234         if (data)
5235                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5236         else
5237                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5238         return 0;
5239 }
5240
5241 #define BNX2_NUM_STATS 46
5242
5243 static struct {
5244         char string[ETH_GSTRING_LEN];
5245 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5246         { "rx_bytes" },
5247         { "rx_error_bytes" },
5248         { "tx_bytes" },
5249         { "tx_error_bytes" },
5250         { "rx_ucast_packets" },
5251         { "rx_mcast_packets" },
5252         { "rx_bcast_packets" },
5253         { "tx_ucast_packets" },
5254         { "tx_mcast_packets" },
5255         { "tx_bcast_packets" },
5256         { "tx_mac_errors" },
5257         { "tx_carrier_errors" },
5258         { "rx_crc_errors" },
5259         { "rx_align_errors" },
5260         { "tx_single_collisions" },
5261         { "tx_multi_collisions" },
5262         { "tx_deferred" },
5263         { "tx_excess_collisions" },
5264         { "tx_late_collisions" },
5265         { "tx_total_collisions" },
5266         { "rx_fragments" },
5267         { "rx_jabbers" },
5268         { "rx_undersize_packets" },
5269         { "rx_oversize_packets" },
5270         { "rx_64_byte_packets" },
5271         { "rx_65_to_127_byte_packets" },
5272         { "rx_128_to_255_byte_packets" },
5273         { "rx_256_to_511_byte_packets" },
5274         { "rx_512_to_1023_byte_packets" },
5275         { "rx_1024_to_1522_byte_packets" },
5276         { "rx_1523_to_9022_byte_packets" },
5277         { "tx_64_byte_packets" },
5278         { "tx_65_to_127_byte_packets" },
5279         { "tx_128_to_255_byte_packets" },
5280         { "tx_256_to_511_byte_packets" },
5281         { "tx_512_to_1023_byte_packets" },
5282         { "tx_1024_to_1522_byte_packets" },
5283         { "tx_1523_to_9022_byte_packets" },
5284         { "rx_xon_frames" },
5285         { "rx_xoff_frames" },
5286         { "tx_xon_frames" },
5287         { "tx_xoff_frames" },
5288         { "rx_mac_ctrl_frames" },
5289         { "rx_filtered_packets" },
5290         { "rx_discards" },
5291         { "rx_fw_discards" },
5292 };
5293
5294 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5295
5296 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5297     STATS_OFFSET32(stat_IfHCInOctets_hi),
5298     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5299     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5300     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5301     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5302     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5303     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5304     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5305     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5306     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5307     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5308     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5309     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5310     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5311     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5312     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5313     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5314     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5315     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5316     STATS_OFFSET32(stat_EtherStatsCollisions),
5317     STATS_OFFSET32(stat_EtherStatsFragments),
5318     STATS_OFFSET32(stat_EtherStatsJabbers),
5319     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5320     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5321     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5322     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5323     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5324     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5325     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5326     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5327     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5328     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5329     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5330     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5331     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5332     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5333     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5334     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5335     STATS_OFFSET32(stat_XonPauseFramesReceived),
5336     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5337     STATS_OFFSET32(stat_OutXonSent),
5338     STATS_OFFSET32(stat_OutXoffSent),
5339     STATS_OFFSET32(stat_MacControlFramesReceived),
5340     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5341     STATS_OFFSET32(stat_IfInMBUFDiscards),
5342     STATS_OFFSET32(stat_FwRxDrop),
5343 };
5344
5345 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5346  * skipped because of errata.
5347  */
5348 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5349         8,0,8,8,8,8,8,8,8,8,
5350         4,0,4,4,4,4,4,4,4,4,
5351         4,4,4,4,4,4,4,4,4,4,
5352         4,4,4,4,4,4,4,4,4,4,
5353         4,4,4,4,4,4,
5354 };
5355
5356 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5357         8,0,8,8,8,8,8,8,8,8,
5358         4,4,4,4,4,4,4,4,4,4,
5359         4,4,4,4,4,4,4,4,4,4,
5360         4,4,4,4,4,4,4,4,4,4,
5361         4,4,4,4,4,4,
5362 };
5363
5364 #define BNX2_NUM_TESTS 6
5365
5366 static struct {
5367         char string[ETH_GSTRING_LEN];
5368 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5369         { "register_test (offline)" },
5370         { "memory_test (offline)" },
5371         { "loopback_test (offline)" },
5372         { "nvram_test (online)" },
5373         { "interrupt_test (online)" },
5374         { "link_test (online)" },
5375 };
5376
5377 static int
5378 bnx2_self_test_count(struct net_device *dev)
5379 {
5380         return BNX2_NUM_TESTS;
5381 }
5382
5383 static void
5384 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5385 {
5386         struct bnx2 *bp = netdev_priv(dev);
5387
5388         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5389         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5390                 int i;
5391
5392                 bnx2_netif_stop(bp);
5393                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5394                 bnx2_free_skbs(bp);
5395
5396                 if (bnx2_test_registers(bp) != 0) {
5397                         buf[0] = 1;
5398                         etest->flags |= ETH_TEST_FL_FAILED;
5399                 }
5400                 if (bnx2_test_memory(bp) != 0) {
5401                         buf[1] = 1;
5402                         etest->flags |= ETH_TEST_FL_FAILED;
5403                 }
5404                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5405                         etest->flags |= ETH_TEST_FL_FAILED;
5406
5407                 if (!netif_running(bp->dev)) {
5408                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5409                 }
5410                 else {
5411                         bnx2_init_nic(bp);
5412                         bnx2_netif_start(bp);
5413                 }
5414
5415                 /* wait for link up */
5416                 for (i = 0; i < 7; i++) {
5417                         if (bp->link_up)
5418                                 break;
5419                         msleep_interruptible(1000);
5420                 }
5421         }
5422
5423         if (bnx2_test_nvram(bp) != 0) {
5424                 buf[3] = 1;
5425                 etest->flags |= ETH_TEST_FL_FAILED;
5426         }
5427         if (bnx2_test_intr(bp) != 0) {
5428                 buf[4] = 1;
5429                 etest->flags |= ETH_TEST_FL_FAILED;
5430         }
5431
5432         if (bnx2_test_link(bp) != 0) {
5433                 buf[5] = 1;
5434                 etest->flags |= ETH_TEST_FL_FAILED;
5435
5436         }
5437 }
5438
5439 static void
5440 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5441 {
5442         switch (stringset) {
5443         case ETH_SS_STATS:
5444                 memcpy(buf, bnx2_stats_str_arr,
5445                         sizeof(bnx2_stats_str_arr));
5446                 break;
5447         case ETH_SS_TEST:
5448                 memcpy(buf, bnx2_tests_str_arr,
5449                         sizeof(bnx2_tests_str_arr));
5450                 break;
5451         }
5452 }
5453
5454 static int
5455 bnx2_get_stats_count(struct net_device *dev)
5456 {
5457         return BNX2_NUM_STATS;
5458 }
5459
5460 static void
5461 bnx2_get_ethtool_stats(struct net_device *dev,
5462                 struct ethtool_stats *stats, u64 *buf)
5463 {
5464         struct bnx2 *bp = netdev_priv(dev);
5465         int i;
5466         u32 *hw_stats = (u32 *) bp->stats_blk;
5467         u8 *stats_len_arr = NULL;
5468
5469         if (hw_stats == NULL) {
5470                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5471                 return;
5472         }
5473
5474         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5475             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5476             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5477             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5478                 stats_len_arr = bnx2_5706_stats_len_arr;
5479         else
5480                 stats_len_arr = bnx2_5708_stats_len_arr;
5481
5482         for (i = 0; i < BNX2_NUM_STATS; i++) {
5483                 if (stats_len_arr[i] == 0) {
5484                         /* skip this counter */
5485                         buf[i] = 0;
5486                         continue;
5487                 }
5488                 if (stats_len_arr[i] == 4) {
5489                         /* 4-byte counter */
5490                         buf[i] = (u64)
5491                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5492                         continue;
5493                 }
5494                 /* 8-byte counter */
5495                 buf[i] = (((u64) *(hw_stats +
5496                                         bnx2_stats_offset_arr[i])) << 32) +
5497                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5498         }
5499 }
5500
5501 static int
5502 bnx2_phys_id(struct net_device *dev, u32 data)
5503 {
5504         struct bnx2 *bp = netdev_priv(dev);
5505         int i;
5506         u32 save;
5507
5508         if (data == 0)
5509                 data = 2;
5510
5511         save = REG_RD(bp, BNX2_MISC_CFG);
5512         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5513
5514         for (i = 0; i < (data * 2); i++) {
5515                 if ((i % 2) == 0) {
5516                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5517                 }
5518                 else {
5519                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5520                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5521                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5522                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5523                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5524                                 BNX2_EMAC_LED_TRAFFIC);
5525                 }
5526                 msleep_interruptible(500);
5527                 if (signal_pending(current))
5528                         break;
5529         }
5530         REG_WR(bp, BNX2_EMAC_LED, 0);
5531         REG_WR(bp, BNX2_MISC_CFG, save);
5532         return 0;
5533 }
5534
5535 static const struct ethtool_ops bnx2_ethtool_ops = {
5536         .get_settings           = bnx2_get_settings,
5537         .set_settings           = bnx2_set_settings,
5538         .get_drvinfo            = bnx2_get_drvinfo,
5539         .get_regs_len           = bnx2_get_regs_len,
5540         .get_regs               = bnx2_get_regs,
5541         .get_wol                = bnx2_get_wol,
5542         .set_wol                = bnx2_set_wol,
5543         .nway_reset             = bnx2_nway_reset,
5544         .get_link               = ethtool_op_get_link,
5545         .get_eeprom_len         = bnx2_get_eeprom_len,
5546         .get_eeprom             = bnx2_get_eeprom,
5547         .set_eeprom             = bnx2_set_eeprom,
5548         .get_coalesce           = bnx2_get_coalesce,
5549         .set_coalesce           = bnx2_set_coalesce,
5550         .get_ringparam          = bnx2_get_ringparam,
5551         .set_ringparam          = bnx2_set_ringparam,
5552         .get_pauseparam         = bnx2_get_pauseparam,
5553         .set_pauseparam         = bnx2_set_pauseparam,
5554         .get_rx_csum            = bnx2_get_rx_csum,
5555         .set_rx_csum            = bnx2_set_rx_csum,
5556         .get_tx_csum            = ethtool_op_get_tx_csum,
5557         .set_tx_csum            = ethtool_op_set_tx_csum,
5558         .get_sg                 = ethtool_op_get_sg,
5559         .set_sg                 = ethtool_op_set_sg,
5560         .get_tso                = ethtool_op_get_tso,
5561         .set_tso                = bnx2_set_tso,
5562         .self_test_count        = bnx2_self_test_count,
5563         .self_test              = bnx2_self_test,
5564         .get_strings            = bnx2_get_strings,
5565         .phys_id                = bnx2_phys_id,
5566         .get_stats_count        = bnx2_get_stats_count,
5567         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5568         .get_perm_addr          = ethtool_op_get_perm_addr,
5569 };
5570
5571 /* Called with rtnl_lock */
5572 static int
5573 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5574 {
5575         struct mii_ioctl_data *data = if_mii(ifr);
5576         struct bnx2 *bp = netdev_priv(dev);
5577         int err;
5578
5579         switch(cmd) {
5580         case SIOCGMIIPHY:
5581                 data->phy_id = bp->phy_addr;
5582
5583                 /* fallthru */
5584         case SIOCGMIIREG: {
5585                 u32 mii_regval;
5586
5587                 if (!netif_running(dev))
5588                         return -EAGAIN;
5589
5590                 spin_lock_bh(&bp->phy_lock);
5591                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5592                 spin_unlock_bh(&bp->phy_lock);
5593
5594                 data->val_out = mii_regval;
5595
5596                 return err;
5597         }
5598
5599         case SIOCSMIIREG:
5600                 if (!capable(CAP_NET_ADMIN))
5601                         return -EPERM;
5602
5603                 if (!netif_running(dev))
5604                         return -EAGAIN;
5605
5606                 spin_lock_bh(&bp->phy_lock);
5607                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5608                 spin_unlock_bh(&bp->phy_lock);
5609
5610                 return err;
5611
5612         default:
5613                 /* do nothing */
5614                 break;
5615         }
5616         return -EOPNOTSUPP;
5617 }
5618
5619 /* Called with rtnl_lock */
5620 static int
5621 bnx2_change_mac_addr(struct net_device *dev, void *p)
5622 {
5623         struct sockaddr *addr = p;
5624         struct bnx2 *bp = netdev_priv(dev);
5625
5626         if (!is_valid_ether_addr(addr->sa_data))
5627                 return -EINVAL;
5628
5629         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5630         if (netif_running(dev))
5631                 bnx2_set_mac_addr(bp);
5632
5633         return 0;
5634 }
5635
5636 /* Called with rtnl_lock */
5637 static int
5638 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5639 {
5640         struct bnx2 *bp = netdev_priv(dev);
5641
5642         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5643                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5644                 return -EINVAL;
5645
5646         dev->mtu = new_mtu;
5647         if (netif_running(dev)) {
5648                 bnx2_netif_stop(bp);
5649
5650                 bnx2_init_nic(bp);
5651
5652                 bnx2_netif_start(bp);
5653         }
5654         return 0;
5655 }
5656
5657 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5658 static void
5659 poll_bnx2(struct net_device *dev)
5660 {
5661         struct bnx2 *bp = netdev_priv(dev);
5662
5663         disable_irq(bp->pdev->irq);
5664         bnx2_interrupt(bp->pdev->irq, dev);
5665         enable_irq(bp->pdev->irq);
5666 }
5667 #endif
5668
5669 static void __devinit
5670 bnx2_get_5709_media(struct bnx2 *bp)
5671 {
5672         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5673         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5674         u32 strap;
5675
5676         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5677                 return;
5678         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5679                 bp->phy_flags |= PHY_SERDES_FLAG;
5680                 return;
5681         }
5682
5683         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5684                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5685         else
5686                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5687
5688         if (PCI_FUNC(bp->pdev->devfn) == 0) {
5689                 switch (strap) {
5690                 case 0x4:
5691                 case 0x5:
5692                 case 0x6:
5693                         bp->phy_flags |= PHY_SERDES_FLAG;
5694                         return;
5695                 }
5696         } else {
5697                 switch (strap) {
5698                 case 0x1:
5699                 case 0x2:
5700                 case 0x4:
5701                         bp->phy_flags |= PHY_SERDES_FLAG;
5702                         return;
5703                 }
5704         }
5705 }
5706
5707 static int __devinit
5708 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5709 {
5710         struct bnx2 *bp;
5711         unsigned long mem_len;
5712         int rc;
5713         u32 reg;
5714         u64 dma_mask, persist_dma_mask;
5715
5716         SET_MODULE_OWNER(dev);
5717         SET_NETDEV_DEV(dev, &pdev->dev);
5718         bp = netdev_priv(dev);
5719
5720         bp->flags = 0;
5721         bp->phy_flags = 0;
5722
5723         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5724         rc = pci_enable_device(pdev);
5725         if (rc) {
5726                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5727                 goto err_out;
5728         }
5729
5730         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5731                 dev_err(&pdev->dev,
5732                         "Cannot find PCI device base address, aborting.\n");
5733                 rc = -ENODEV;
5734                 goto err_out_disable;
5735         }
5736
5737         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5738         if (rc) {
5739                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5740                 goto err_out_disable;
5741         }
5742
5743         pci_set_master(pdev);
5744
5745         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5746         if (bp->pm_cap == 0) {
5747                 dev_err(&pdev->dev,
5748                         "Cannot find power management capability, aborting.\n");
5749                 rc = -EIO;
5750                 goto err_out_release;
5751         }
5752
5753         bp->dev = dev;
5754         bp->pdev = pdev;
5755
5756         spin_lock_init(&bp->phy_lock);
5757         INIT_WORK(&bp->reset_task, bnx2_reset_task);
5758
5759         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5760         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5761         dev->mem_end = dev->mem_start + mem_len;
5762         dev->irq = pdev->irq;
5763
5764         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5765
5766         if (!bp->regview) {
5767                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5768                 rc = -ENOMEM;
5769                 goto err_out_release;
5770         }
5771
5772         /* Configure byte swap and enable write to the reg_window registers.
5773          * Rely on CPU to do target byte swapping on big endian systems
5774          * The chip's target access swapping will not swap all accesses
5775          */
5776         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5777                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5778                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5779
5780         bnx2_set_power_state(bp, PCI_D0);
5781
5782         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5783
5784         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5785                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5786                 if (bp->pcix_cap == 0) {
5787                         dev_err(&pdev->dev,
5788                                 "Cannot find PCIX capability, aborting.\n");
5789                         rc = -EIO;
5790                         goto err_out_unmap;
5791                 }
5792         }
5793
5794         /* 5708 cannot support DMA addresses > 40-bit.  */
5795         if (CHIP_NUM(bp) == CHIP_NUM_5708)
5796                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5797         else
5798                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5799
5800         /* Configure DMA attributes. */
5801         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5802                 dev->features |= NETIF_F_HIGHDMA;
5803                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5804                 if (rc) {
5805                         dev_err(&pdev->dev,
5806                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5807                         goto err_out_unmap;
5808                 }
5809         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5810                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5811                 goto err_out_unmap;
5812         }
5813
5814         /* Get bus information. */
5815         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5816         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5817                 u32 clkreg;
5818
5819                 bp->flags |= PCIX_FLAG;
5820
5821                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5822
5823                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5824                 switch (clkreg) {
5825                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5826                         bp->bus_speed_mhz = 133;
5827                         break;
5828
5829                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5830                         bp->bus_speed_mhz = 100;
5831                         break;
5832
5833                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5834                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5835                         bp->bus_speed_mhz = 66;
5836                         break;
5837
5838                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5839                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5840                         bp->bus_speed_mhz = 50;
5841                         break;
5842
5843                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5844                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5845                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5846                         bp->bus_speed_mhz = 33;
5847                         break;
5848                 }
5849         }
5850         else {
5851                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5852                         bp->bus_speed_mhz = 66;
5853                 else
5854                         bp->bus_speed_mhz = 33;
5855         }
5856
5857         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5858                 bp->flags |= PCI_32BIT_FLAG;
5859
5860         /* 5706A0 may falsely detect SERR and PERR. */
5861         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5862                 reg = REG_RD(bp, PCI_COMMAND);
5863                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5864                 REG_WR(bp, PCI_COMMAND, reg);
5865         }
5866         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5867                 !(bp->flags & PCIX_FLAG)) {
5868
5869                 dev_err(&pdev->dev,
5870                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5871                 goto err_out_unmap;
5872         }
5873
5874         bnx2_init_nvram(bp);
5875
5876         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5877
5878         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5879             BNX2_SHM_HDR_SIGNATURE_SIG) {
5880                 u32 off = PCI_FUNC(pdev->devfn) << 2;
5881
5882                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5883         } else
5884                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5885
5886         /* Get the permanent MAC address.  First we need to make sure the
5887          * firmware is actually running.
5888          */
5889         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5890
5891         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5892             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5893                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5894                 rc = -ENODEV;
5895                 goto err_out_unmap;
5896         }
5897
5898         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5899
5900         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5901         bp->mac_addr[0] = (u8) (reg >> 8);
5902         bp->mac_addr[1] = (u8) reg;
5903
5904         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5905         bp->mac_addr[2] = (u8) (reg >> 24);
5906         bp->mac_addr[3] = (u8) (reg >> 16);
5907         bp->mac_addr[4] = (u8) (reg >> 8);
5908         bp->mac_addr[5] = (u8) reg;
5909
5910         bp->tx_ring_size = MAX_TX_DESC_CNT;
5911         bnx2_set_rx_ring_size(bp, 255);
5912
5913         bp->rx_csum = 1;
5914
5915         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5916
5917         bp->tx_quick_cons_trip_int = 20;
5918         bp->tx_quick_cons_trip = 20;
5919         bp->tx_ticks_int = 80;
5920         bp->tx_ticks = 80;
5921
5922         bp->rx_quick_cons_trip_int = 6;
5923         bp->rx_quick_cons_trip = 6;
5924         bp->rx_ticks_int = 18;
5925         bp->rx_ticks = 18;
5926
5927         bp->stats_ticks = 1000000 & 0xffff00;
5928
5929         bp->timer_interval =  HZ;
5930         bp->current_interval =  HZ;
5931
5932         bp->phy_addr = 1;
5933
5934         /* Disable WOL support if we are running on a SERDES chip. */
5935         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5936                 bnx2_get_5709_media(bp);
5937         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5938                 bp->phy_flags |= PHY_SERDES_FLAG;
5939
5940         if (bp->phy_flags & PHY_SERDES_FLAG) {
5941                 bp->flags |= NO_WOL_FLAG;
5942                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5943                         bp->phy_addr = 2;
5944                         reg = REG_RD_IND(bp, bp->shmem_base +
5945                                          BNX2_SHARED_HW_CFG_CONFIG);
5946                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5947                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5948                 }
5949         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5950                    CHIP_NUM(bp) == CHIP_NUM_5708)
5951                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5952         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5953                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5954
5955         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5956             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5957             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5958                 bp->flags |= NO_WOL_FLAG;
5959
5960         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5961                 bp->tx_quick_cons_trip_int =
5962                         bp->tx_quick_cons_trip;
5963                 bp->tx_ticks_int = bp->tx_ticks;
5964                 bp->rx_quick_cons_trip_int =
5965                         bp->rx_quick_cons_trip;
5966                 bp->rx_ticks_int = bp->rx_ticks;
5967                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5968                 bp->com_ticks_int = bp->com_ticks;
5969                 bp->cmd_ticks_int = bp->cmd_ticks;
5970         }
5971
5972         /* Disable MSI on 5706 if AMD 8132 bridge is found.
5973          *
5974          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
5975          * with byte enables disabled on the unused 32-bit word.  This is legal
5976          * but causes problems on the AMD 8132 which will eventually stop
5977          * responding after a while.
5978          *
5979          * AMD believes this incompatibility is unique to the 5706, and
5980          * prefers to locally disable MSI rather than globally disabling it.
5981          */
5982         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5983                 struct pci_dev *amd_8132 = NULL;
5984
5985                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5986                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
5987                                                   amd_8132))) {
5988                         u8 rev;
5989
5990                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5991                         if (rev >= 0x10 && rev <= 0x13) {
5992                                 disable_msi = 1;
5993                                 pci_dev_put(amd_8132);
5994                                 break;
5995                         }
5996                 }
5997         }
5998
5999         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6000         bp->req_line_speed = 0;
6001         if (bp->phy_flags & PHY_SERDES_FLAG) {
6002                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6003
6004                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6005                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6006                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6007                         bp->autoneg = 0;
6008                         bp->req_line_speed = bp->line_speed = SPEED_1000;
6009                         bp->req_duplex = DUPLEX_FULL;
6010                 }
6011         }
6012         else {
6013                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6014         }
6015
6016         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6017
6018         init_timer(&bp->timer);
6019         bp->timer.expires = RUN_AT(bp->timer_interval);
6020         bp->timer.data = (unsigned long) bp;
6021         bp->timer.function = bnx2_timer;
6022
6023         return 0;
6024
6025 err_out_unmap:
6026         if (bp->regview) {
6027                 iounmap(bp->regview);
6028                 bp->regview = NULL;
6029         }
6030
6031 err_out_release:
6032         pci_release_regions(pdev);
6033
6034 err_out_disable:
6035         pci_disable_device(pdev);
6036         pci_set_drvdata(pdev, NULL);
6037
6038 err_out:
6039         return rc;
6040 }
6041
6042 static int __devinit
6043 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6044 {
6045         static int version_printed = 0;
6046         struct net_device *dev = NULL;
6047         struct bnx2 *bp;
6048         int rc, i;
6049
6050         if (version_printed++ == 0)
6051                 printk(KERN_INFO "%s", version);
6052
6053         /* dev zeroed in init_etherdev */
6054         dev = alloc_etherdev(sizeof(*bp));
6055
6056         if (!dev)
6057                 return -ENOMEM;
6058
6059         rc = bnx2_init_board(pdev, dev);
6060         if (rc < 0) {
6061                 free_netdev(dev);
6062                 return rc;
6063         }
6064
6065         dev->open = bnx2_open;
6066         dev->hard_start_xmit = bnx2_start_xmit;
6067         dev->stop = bnx2_close;
6068         dev->get_stats = bnx2_get_stats;
6069         dev->set_multicast_list = bnx2_set_rx_mode;
6070         dev->do_ioctl = bnx2_ioctl;
6071         dev->set_mac_address = bnx2_change_mac_addr;
6072         dev->change_mtu = bnx2_change_mtu;
6073         dev->tx_timeout = bnx2_tx_timeout;
6074         dev->watchdog_timeo = TX_TIMEOUT;
6075 #ifdef BCM_VLAN
6076         dev->vlan_rx_register = bnx2_vlan_rx_register;
6077         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6078 #endif
6079         dev->poll = bnx2_poll;
6080         dev->ethtool_ops = &bnx2_ethtool_ops;
6081         dev->weight = 64;
6082
6083         bp = netdev_priv(dev);
6084
6085 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6086         dev->poll_controller = poll_bnx2;
6087 #endif
6088
6089         if ((rc = register_netdev(dev))) {
6090                 dev_err(&pdev->dev, "Cannot register net device\n");
6091                 if (bp->regview)
6092                         iounmap(bp->regview);
6093                 pci_release_regions(pdev);
6094                 pci_disable_device(pdev);
6095                 pci_set_drvdata(pdev, NULL);
6096                 free_netdev(dev);
6097                 return rc;
6098         }
6099
6100         pci_set_drvdata(pdev, dev);
6101
6102         memcpy(dev->dev_addr, bp->mac_addr, 6);
6103         memcpy(dev->perm_addr, bp->mac_addr, 6);
6104         bp->name = board_info[ent->driver_data].name,
6105         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6106                 "IRQ %d, ",
6107                 dev->name,
6108                 bp->name,
6109                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6110                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6111                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6112                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6113                 bp->bus_speed_mhz,
6114                 dev->base_addr,
6115                 bp->pdev->irq);
6116
6117         printk("node addr ");
6118         for (i = 0; i < 6; i++)
6119                 printk("%2.2x", dev->dev_addr[i]);
6120         printk("\n");
6121
6122         dev->features |= NETIF_F_SG;
6123         dev->features |= NETIF_F_IP_CSUM;
6124 #ifdef BCM_VLAN
6125         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6126 #endif
6127         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6128
6129         netif_carrier_off(bp->dev);
6130
6131         return 0;
6132 }
6133
6134 static void __devexit
6135 bnx2_remove_one(struct pci_dev *pdev)
6136 {
6137         struct net_device *dev = pci_get_drvdata(pdev);
6138         struct bnx2 *bp = netdev_priv(dev);
6139
6140         flush_scheduled_work();
6141
6142         unregister_netdev(dev);
6143
6144         if (bp->regview)
6145                 iounmap(bp->regview);
6146
6147         free_netdev(dev);
6148         pci_release_regions(pdev);
6149         pci_disable_device(pdev);
6150         pci_set_drvdata(pdev, NULL);
6151 }
6152
6153 static int
6154 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6155 {
6156         struct net_device *dev = pci_get_drvdata(pdev);
6157         struct bnx2 *bp = netdev_priv(dev);
6158         u32 reset_code;
6159
6160         if (!netif_running(dev))
6161                 return 0;
6162
6163         flush_scheduled_work();
6164         bnx2_netif_stop(bp);
6165         netif_device_detach(dev);
6166         del_timer_sync(&bp->timer);
6167         if (bp->flags & NO_WOL_FLAG)
6168                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6169         else if (bp->wol)
6170                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6171         else
6172                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6173         bnx2_reset_chip(bp, reset_code);
6174         bnx2_free_skbs(bp);
6175         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6176         return 0;
6177 }
6178
6179 static int
6180 bnx2_resume(struct pci_dev *pdev)
6181 {
6182         struct net_device *dev = pci_get_drvdata(pdev);
6183         struct bnx2 *bp = netdev_priv(dev);
6184
6185         if (!netif_running(dev))
6186                 return 0;
6187
6188