[BNX2]: Add support for 5709 Serdes.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
238         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
239 }
240
241 static void
242 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243 {
244         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
245         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
246 }
247
248 static void
249 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
250 {
251         offset += cid_addr;
252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
253                 int i;
254
255                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
256                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
257                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
258                 for (i = 0; i < 5; i++) {
259                         u32 val;
260                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
261                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
262                                 break;
263                         udelay(5);
264                 }
265         } else {
266                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
267                 REG_WR(bp, BNX2_CTX_DATA, val);
268         }
269 }
270
271 static int
272 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
273 {
274         u32 val1;
275         int i, ret;
276
277         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
278                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280
281                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
282                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
283
284                 udelay(40);
285         }
286
287         val1 = (bp->phy_addr << 21) | (reg << 16) |
288                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
289                 BNX2_EMAC_MDIO_COMM_START_BUSY;
290         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291
292         for (i = 0; i < 50; i++) {
293                 udelay(10);
294
295                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
297                         udelay(5);
298
299                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
300                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
301
302                         break;
303                 }
304         }
305
306         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
307                 *val = 0x0;
308                 ret = -EBUSY;
309         }
310         else {
311                 *val = val1;
312                 ret = 0;
313         }
314
315         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
316                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322                 udelay(40);
323         }
324
325         return ret;
326 }
327
328 static int
329 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
330 {
331         u32 val1;
332         int i, ret;
333
334         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
335                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337
338                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
339                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
340
341                 udelay(40);
342         }
343
344         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
345                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
346                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
347         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
348
349         for (i = 0; i < 50; i++) {
350                 udelay(10);
351
352                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
353                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
354                         udelay(5);
355                         break;
356                 }
357         }
358
359         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
360                 ret = -EBUSY;
361         else
362                 ret = 0;
363
364         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
365                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367
368                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
369                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370
371                 udelay(40);
372         }
373
374         return ret;
375 }
376
377 static void
378 bnx2_disable_int(struct bnx2 *bp)
379 {
380         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
381                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
382         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
383 }
384
385 static void
386 bnx2_enable_int(struct bnx2 *bp)
387 {
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391
392         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
393                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394
395         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
396 }
397
398 static void
399 bnx2_disable_int_sync(struct bnx2 *bp)
400 {
401         atomic_inc(&bp->intr_sem);
402         bnx2_disable_int(bp);
403         synchronize_irq(bp->pdev->irq);
404 }
405
406 static void
407 bnx2_netif_stop(struct bnx2 *bp)
408 {
409         bnx2_disable_int_sync(bp);
410         if (netif_running(bp->dev)) {
411                 netif_poll_disable(bp->dev);
412                 netif_tx_disable(bp->dev);
413                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
414         }
415 }
416
417 static void
418 bnx2_netif_start(struct bnx2 *bp)
419 {
420         if (atomic_dec_and_test(&bp->intr_sem)) {
421                 if (netif_running(bp->dev)) {
422                         netif_wake_queue(bp->dev);
423                         netif_poll_enable(bp->dev);
424                         bnx2_enable_int(bp);
425                 }
426         }
427 }
428
429 static void
430 bnx2_free_mem(struct bnx2 *bp)
431 {
432         int i;
433
434         for (i = 0; i < bp->ctx_pages; i++) {
435                 if (bp->ctx_blk[i]) {
436                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
437                                             bp->ctx_blk[i],
438                                             bp->ctx_blk_mapping[i]);
439                         bp->ctx_blk[i] = NULL;
440                 }
441         }
442         if (bp->status_blk) {
443                 pci_free_consistent(bp->pdev, bp->status_stats_size,
444                                     bp->status_blk, bp->status_blk_mapping);
445                 bp->status_blk = NULL;
446                 bp->stats_blk = NULL;
447         }
448         if (bp->tx_desc_ring) {
449                 pci_free_consistent(bp->pdev,
450                                     sizeof(struct tx_bd) * TX_DESC_CNT,
451                                     bp->tx_desc_ring, bp->tx_desc_mapping);
452                 bp->tx_desc_ring = NULL;
453         }
454         kfree(bp->tx_buf_ring);
455         bp->tx_buf_ring = NULL;
456         for (i = 0; i < bp->rx_max_ring; i++) {
457                 if (bp->rx_desc_ring[i])
458                         pci_free_consistent(bp->pdev,
459                                             sizeof(struct rx_bd) * RX_DESC_CNT,
460                                             bp->rx_desc_ring[i],
461                                             bp->rx_desc_mapping[i]);
462                 bp->rx_desc_ring[i] = NULL;
463         }
464         vfree(bp->rx_buf_ring);
465         bp->rx_buf_ring = NULL;
466 }
467
468 static int
469 bnx2_alloc_mem(struct bnx2 *bp)
470 {
471         int i, status_blk_size;
472
473         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
474                                   GFP_KERNEL);
475         if (bp->tx_buf_ring == NULL)
476                 return -ENOMEM;
477
478         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
479                                                 sizeof(struct tx_bd) *
480                                                 TX_DESC_CNT,
481                                                 &bp->tx_desc_mapping);
482         if (bp->tx_desc_ring == NULL)
483                 goto alloc_mem_err;
484
485         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
486                                   bp->rx_max_ring);
487         if (bp->rx_buf_ring == NULL)
488                 goto alloc_mem_err;
489
490         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
491                                    bp->rx_max_ring);
492
493         for (i = 0; i < bp->rx_max_ring; i++) {
494                 bp->rx_desc_ring[i] =
495                         pci_alloc_consistent(bp->pdev,
496                                              sizeof(struct rx_bd) * RX_DESC_CNT,
497                                              &bp->rx_desc_mapping[i]);
498                 if (bp->rx_desc_ring[i] == NULL)
499                         goto alloc_mem_err;
500
501         }
502
503         /* Combine status and statistics blocks into one allocation. */
504         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
505         bp->status_stats_size = status_blk_size +
506                                 sizeof(struct statistics_block);
507
508         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
509                                               &bp->status_blk_mapping);
510         if (bp->status_blk == NULL)
511                 goto alloc_mem_err;
512
513         memset(bp->status_blk, 0, bp->status_stats_size);
514
515         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
516                                   status_blk_size);
517
518         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
519
520         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
521                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
522                 if (bp->ctx_pages == 0)
523                         bp->ctx_pages = 1;
524                 for (i = 0; i < bp->ctx_pages; i++) {
525                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
526                                                 BCM_PAGE_SIZE,
527                                                 &bp->ctx_blk_mapping[i]);
528                         if (bp->ctx_blk[i] == NULL)
529                                 goto alloc_mem_err;
530                 }
531         }
532         return 0;
533
534 alloc_mem_err:
535         bnx2_free_mem(bp);
536         return -ENOMEM;
537 }
538
539 static void
540 bnx2_report_fw_link(struct bnx2 *bp)
541 {
542         u32 fw_link_status = 0;
543
544         if (bp->link_up) {
545                 u32 bmsr;
546
547                 switch (bp->line_speed) {
548                 case SPEED_10:
549                         if (bp->duplex == DUPLEX_HALF)
550                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
551                         else
552                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
553                         break;
554                 case SPEED_100:
555                         if (bp->duplex == DUPLEX_HALF)
556                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
557                         else
558                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
559                         break;
560                 case SPEED_1000:
561                         if (bp->duplex == DUPLEX_HALF)
562                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
563                         else
564                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
565                         break;
566                 case SPEED_2500:
567                         if (bp->duplex == DUPLEX_HALF)
568                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
569                         else
570                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
571                         break;
572                 }
573
574                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
575
576                 if (bp->autoneg) {
577                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578
579                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
580                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
581
582                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
584                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
585                         else
586                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
587                 }
588         }
589         else
590                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591
592         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
593 }
594
595 static void
596 bnx2_report_link(struct bnx2 *bp)
597 {
598         if (bp->link_up) {
599                 netif_carrier_on(bp->dev);
600                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601
602                 printk("%d Mbps ", bp->line_speed);
603
604                 if (bp->duplex == DUPLEX_FULL)
605                         printk("full duplex");
606                 else
607                         printk("half duplex");
608
609                 if (bp->flow_ctrl) {
610                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
611                                 printk(", receive ");
612                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
613                                         printk("& transmit ");
614                         }
615                         else {
616                                 printk(", transmit ");
617                         }
618                         printk("flow control ON");
619                 }
620                 printk("\n");
621         }
622         else {
623                 netif_carrier_off(bp->dev);
624                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
625         }
626
627         bnx2_report_fw_link(bp);
628 }
629
630 static void
631 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632 {
633         u32 local_adv, remote_adv;
634
635         bp->flow_ctrl = 0;
636         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
637                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638
639                 if (bp->duplex == DUPLEX_FULL) {
640                         bp->flow_ctrl = bp->req_flow_ctrl;
641                 }
642                 return;
643         }
644
645         if (bp->duplex != DUPLEX_FULL) {
646                 return;
647         }
648
649         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
650             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
651                 u32 val;
652
653                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
654                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
655                         bp->flow_ctrl |= FLOW_CTRL_TX;
656                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
657                         bp->flow_ctrl |= FLOW_CTRL_RX;
658                 return;
659         }
660
661         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
662         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
663
664         if (bp->phy_flags & PHY_SERDES_FLAG) {
665                 u32 new_local_adv = 0;
666                 u32 new_remote_adv = 0;
667
668                 if (local_adv & ADVERTISE_1000XPAUSE)
669                         new_local_adv |= ADVERTISE_PAUSE_CAP;
670                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
671                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
672                 if (remote_adv & ADVERTISE_1000XPAUSE)
673                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
674                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
675                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676
677                 local_adv = new_local_adv;
678                 remote_adv = new_remote_adv;
679         }
680
681         /* See Table 28B-3 of 802.3ab-1999 spec. */
682         if (local_adv & ADVERTISE_PAUSE_CAP) {
683                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
684                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
685                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686                         }
687                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
688                                 bp->flow_ctrl = FLOW_CTRL_RX;
689                         }
690                 }
691                 else {
692                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
693                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
694                         }
695                 }
696         }
697         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
698                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
699                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700
701                         bp->flow_ctrl = FLOW_CTRL_TX;
702                 }
703         }
704 }
705
706 static int
707 bnx2_5709s_linkup(struct bnx2 *bp)
708 {
709         u32 val, speed;
710
711         bp->link_up = 1;
712
713         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
714         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
715         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
716
717         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
718                 bp->line_speed = bp->req_line_speed;
719                 bp->duplex = bp->req_duplex;
720                 return 0;
721         }
722         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
723         switch (speed) {
724                 case MII_BNX2_GP_TOP_AN_SPEED_10:
725                         bp->line_speed = SPEED_10;
726                         break;
727                 case MII_BNX2_GP_TOP_AN_SPEED_100:
728                         bp->line_speed = SPEED_100;
729                         break;
730                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
731                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
732                         bp->line_speed = SPEED_1000;
733                         break;
734                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
735                         bp->line_speed = SPEED_2500;
736                         break;
737         }
738         if (val & MII_BNX2_GP_TOP_AN_FD)
739                 bp->duplex = DUPLEX_FULL;
740         else
741                 bp->duplex = DUPLEX_HALF;
742         return 0;
743 }
744
745 static int
746 bnx2_5708s_linkup(struct bnx2 *bp)
747 {
748         u32 val;
749
750         bp->link_up = 1;
751         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
752         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
753                 case BCM5708S_1000X_STAT1_SPEED_10:
754                         bp->line_speed = SPEED_10;
755                         break;
756                 case BCM5708S_1000X_STAT1_SPEED_100:
757                         bp->line_speed = SPEED_100;
758                         break;
759                 case BCM5708S_1000X_STAT1_SPEED_1G:
760                         bp->line_speed = SPEED_1000;
761                         break;
762                 case BCM5708S_1000X_STAT1_SPEED_2G5:
763                         bp->line_speed = SPEED_2500;
764                         break;
765         }
766         if (val & BCM5708S_1000X_STAT1_FD)
767                 bp->duplex = DUPLEX_FULL;
768         else
769                 bp->duplex = DUPLEX_HALF;
770
771         return 0;
772 }
773
774 static int
775 bnx2_5706s_linkup(struct bnx2 *bp)
776 {
777         u32 bmcr, local_adv, remote_adv, common;
778
779         bp->link_up = 1;
780         bp->line_speed = SPEED_1000;
781
782         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
783         if (bmcr & BMCR_FULLDPLX) {
784                 bp->duplex = DUPLEX_FULL;
785         }
786         else {
787                 bp->duplex = DUPLEX_HALF;
788         }
789
790         if (!(bmcr & BMCR_ANENABLE)) {
791                 return 0;
792         }
793
794         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
795         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
796
797         common = local_adv & remote_adv;
798         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
799
800                 if (common & ADVERTISE_1000XFULL) {
801                         bp->duplex = DUPLEX_FULL;
802                 }
803                 else {
804                         bp->duplex = DUPLEX_HALF;
805                 }
806         }
807
808         return 0;
809 }
810
811 static int
812 bnx2_copper_linkup(struct bnx2 *bp)
813 {
814         u32 bmcr;
815
816         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
817         if (bmcr & BMCR_ANENABLE) {
818                 u32 local_adv, remote_adv, common;
819
820                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
821                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
822
823                 common = local_adv & (remote_adv >> 2);
824                 if (common & ADVERTISE_1000FULL) {
825                         bp->line_speed = SPEED_1000;
826                         bp->duplex = DUPLEX_FULL;
827                 }
828                 else if (common & ADVERTISE_1000HALF) {
829                         bp->line_speed = SPEED_1000;
830                         bp->duplex = DUPLEX_HALF;
831                 }
832                 else {
833                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
834                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
835
836                         common = local_adv & remote_adv;
837                         if (common & ADVERTISE_100FULL) {
838                                 bp->line_speed = SPEED_100;
839                                 bp->duplex = DUPLEX_FULL;
840                         }
841                         else if (common & ADVERTISE_100HALF) {
842                                 bp->line_speed = SPEED_100;
843                                 bp->duplex = DUPLEX_HALF;
844                         }
845                         else if (common & ADVERTISE_10FULL) {
846                                 bp->line_speed = SPEED_10;
847                                 bp->duplex = DUPLEX_FULL;
848                         }
849                         else if (common & ADVERTISE_10HALF) {
850                                 bp->line_speed = SPEED_10;
851                                 bp->duplex = DUPLEX_HALF;
852                         }
853                         else {
854                                 bp->line_speed = 0;
855                                 bp->link_up = 0;
856                         }
857                 }
858         }
859         else {
860                 if (bmcr & BMCR_SPEED100) {
861                         bp->line_speed = SPEED_100;
862                 }
863                 else {
864                         bp->line_speed = SPEED_10;
865                 }
866                 if (bmcr & BMCR_FULLDPLX) {
867                         bp->duplex = DUPLEX_FULL;
868                 }
869                 else {
870                         bp->duplex = DUPLEX_HALF;
871                 }
872         }
873
874         return 0;
875 }
876
877 static int
878 bnx2_set_mac_link(struct bnx2 *bp)
879 {
880         u32 val;
881
882         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
883         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
884                 (bp->duplex == DUPLEX_HALF)) {
885                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
886         }
887
888         /* Configure the EMAC mode register. */
889         val = REG_RD(bp, BNX2_EMAC_MODE);
890
891         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
892                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
893                 BNX2_EMAC_MODE_25G_MODE);
894
895         if (bp->link_up) {
896                 switch (bp->line_speed) {
897                         case SPEED_10:
898                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
899                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
900                                         break;
901                                 }
902                                 /* fall through */
903                         case SPEED_100:
904                                 val |= BNX2_EMAC_MODE_PORT_MII;
905                                 break;
906                         case SPEED_2500:
907                                 val |= BNX2_EMAC_MODE_25G_MODE;
908                                 /* fall through */
909                         case SPEED_1000:
910                                 val |= BNX2_EMAC_MODE_PORT_GMII;
911                                 break;
912                 }
913         }
914         else {
915                 val |= BNX2_EMAC_MODE_PORT_GMII;
916         }
917
918         /* Set the MAC to operate in the appropriate duplex mode. */
919         if (bp->duplex == DUPLEX_HALF)
920                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
921         REG_WR(bp, BNX2_EMAC_MODE, val);
922
923         /* Enable/disable rx PAUSE. */
924         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
925
926         if (bp->flow_ctrl & FLOW_CTRL_RX)
927                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
928         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
929
930         /* Enable/disable tx PAUSE. */
931         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
932         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
933
934         if (bp->flow_ctrl & FLOW_CTRL_TX)
935                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
936         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
937
938         /* Acknowledge the interrupt. */
939         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
940
941         return 0;
942 }
943
944 static void
945 bnx2_enable_bmsr1(struct bnx2 *bp)
946 {
947         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
948             (CHIP_NUM(bp) == CHIP_NUM_5709))
949                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
950                                MII_BNX2_BLK_ADDR_GP_STATUS);
951 }
952
953 static void
954 bnx2_disable_bmsr1(struct bnx2 *bp)
955 {
956         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957             (CHIP_NUM(bp) == CHIP_NUM_5709))
958                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
960 }
961
962 static int
963 bnx2_test_and_enable_2g5(struct bnx2 *bp)
964 {
965         u32 up1;
966         int ret = 1;
967
968         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
969                 return 0;
970
971         if (bp->autoneg & AUTONEG_SPEED)
972                 bp->advertising |= ADVERTISED_2500baseX_Full;
973
974         if (CHIP_NUM(bp) == CHIP_NUM_5709)
975                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
976
977         bnx2_read_phy(bp, bp->mii_up1, &up1);
978         if (!(up1 & BCM5708S_UP1_2G5)) {
979                 up1 |= BCM5708S_UP1_2G5;
980                 bnx2_write_phy(bp, bp->mii_up1, up1);
981                 ret = 0;
982         }
983
984         if (CHIP_NUM(bp) == CHIP_NUM_5709)
985                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987
988         return ret;
989 }
990
991 static int
992 bnx2_test_and_disable_2g5(struct bnx2 *bp)
993 {
994         u32 up1;
995         int ret = 0;
996
997         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
998                 return 0;
999
1000         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1001                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1002
1003         bnx2_read_phy(bp, bp->mii_up1, &up1);
1004         if (up1 & BCM5708S_UP1_2G5) {
1005                 up1 &= ~BCM5708S_UP1_2G5;
1006                 bnx2_write_phy(bp, bp->mii_up1, up1);
1007                 ret = 1;
1008         }
1009
1010         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1011                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1012                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1013
1014         return ret;
1015 }
1016
1017 static void
1018 bnx2_enable_forced_2g5(struct bnx2 *bp)
1019 {
1020         u32 bmcr;
1021
1022         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1023                 return;
1024
1025         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1026                 u32 val;
1027
1028                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1029                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1030                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1031                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1032                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1033                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1034
1035                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1036                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1037                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1038
1039         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1040                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1041                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1042         }
1043
1044         if (bp->autoneg & AUTONEG_SPEED) {
1045                 bmcr &= ~BMCR_ANENABLE;
1046                 if (bp->req_duplex == DUPLEX_FULL)
1047                         bmcr |= BMCR_FULLDPLX;
1048         }
1049         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1050 }
1051
1052 static void
1053 bnx2_disable_forced_2g5(struct bnx2 *bp)
1054 {
1055         u32 bmcr;
1056
1057         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058                 return;
1059
1060         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061                 u32 val;
1062
1063                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1065                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1067                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1068
1069                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1070                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1071                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1072
1073         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1074                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1075                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1076         }
1077
1078         if (bp->autoneg & AUTONEG_SPEED)
1079                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1080         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1081 }
1082
1083 static int
1084 bnx2_set_link(struct bnx2 *bp)
1085 {
1086         u32 bmsr;
1087         u8 link_up;
1088
1089         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1090                 bp->link_up = 1;
1091                 return 0;
1092         }
1093
1094         link_up = bp->link_up;
1095
1096         bnx2_enable_bmsr1(bp);
1097         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1098         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1099         bnx2_disable_bmsr1(bp);
1100
1101         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1102             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1103                 u32 val;
1104
1105                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1106                 if (val & BNX2_EMAC_STATUS_LINK)
1107                         bmsr |= BMSR_LSTATUS;
1108                 else
1109                         bmsr &= ~BMSR_LSTATUS;
1110         }
1111
1112         if (bmsr & BMSR_LSTATUS) {
1113                 bp->link_up = 1;
1114
1115                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1116                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1117                                 bnx2_5706s_linkup(bp);
1118                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1119                                 bnx2_5708s_linkup(bp);
1120                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1121                                 bnx2_5709s_linkup(bp);
1122                 }
1123                 else {
1124                         bnx2_copper_linkup(bp);
1125                 }
1126                 bnx2_resolve_flow_ctrl(bp);
1127         }
1128         else {
1129                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1130                     (bp->autoneg & AUTONEG_SPEED))
1131                         bnx2_disable_forced_2g5(bp);
1132
1133                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1134                 bp->link_up = 0;
1135         }
1136
1137         if (bp->link_up != link_up) {
1138                 bnx2_report_link(bp);
1139         }
1140
1141         bnx2_set_mac_link(bp);
1142
1143         return 0;
1144 }
1145
1146 static int
1147 bnx2_reset_phy(struct bnx2 *bp)
1148 {
1149         int i;
1150         u32 reg;
1151
1152         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1153
1154 #define PHY_RESET_MAX_WAIT 100
1155         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1156                 udelay(10);
1157
1158                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1159                 if (!(reg & BMCR_RESET)) {
1160                         udelay(20);
1161                         break;
1162                 }
1163         }
1164         if (i == PHY_RESET_MAX_WAIT) {
1165                 return -EBUSY;
1166         }
1167         return 0;
1168 }
1169
1170 static u32
1171 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1172 {
1173         u32 adv = 0;
1174
1175         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1176                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1177
1178                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1179                         adv = ADVERTISE_1000XPAUSE;
1180                 }
1181                 else {
1182                         adv = ADVERTISE_PAUSE_CAP;
1183                 }
1184         }
1185         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1186                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1187                         adv = ADVERTISE_1000XPSE_ASYM;
1188                 }
1189                 else {
1190                         adv = ADVERTISE_PAUSE_ASYM;
1191                 }
1192         }
1193         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1194                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1195                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1196                 }
1197                 else {
1198                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199                 }
1200         }
1201         return adv;
1202 }
1203
1204 static int
1205 bnx2_setup_serdes_phy(struct bnx2 *bp)
1206 {
1207         u32 adv, bmcr;
1208         u32 new_adv = 0;
1209
1210         if (!(bp->autoneg & AUTONEG_SPEED)) {
1211                 u32 new_bmcr;
1212                 int force_link_down = 0;
1213
1214                 if (bp->req_line_speed == SPEED_2500) {
1215                         if (!bnx2_test_and_enable_2g5(bp))
1216                                 force_link_down = 1;
1217                 } else if (bp->req_line_speed == SPEED_1000) {
1218                         if (bnx2_test_and_disable_2g5(bp))
1219                                 force_link_down = 1;
1220                 }
1221                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1222                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1223
1224                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1225                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1226                 new_bmcr |= BMCR_SPEED1000;
1227
1228                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1229                         if (bp->req_line_speed == SPEED_2500)
1230                                 bnx2_enable_forced_2g5(bp);
1231                         else if (bp->req_line_speed == SPEED_1000) {
1232                                 bnx2_disable_forced_2g5(bp);
1233                                 new_bmcr &= ~0x2000;
1234                         }
1235
1236                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1237                         if (bp->req_line_speed == SPEED_2500)
1238                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1239                         else
1240                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1241                 }
1242
1243                 if (bp->req_duplex == DUPLEX_FULL) {
1244                         adv |= ADVERTISE_1000XFULL;
1245                         new_bmcr |= BMCR_FULLDPLX;
1246                 }
1247                 else {
1248                         adv |= ADVERTISE_1000XHALF;
1249                         new_bmcr &= ~BMCR_FULLDPLX;
1250                 }
1251                 if ((new_bmcr != bmcr) || (force_link_down)) {
1252                         /* Force a link down visible on the other side */
1253                         if (bp->link_up) {
1254                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1255                                                ~(ADVERTISE_1000XFULL |
1256                                                  ADVERTISE_1000XHALF));
1257                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1258                                         BMCR_ANRESTART | BMCR_ANENABLE);
1259
1260                                 bp->link_up = 0;
1261                                 netif_carrier_off(bp->dev);
1262                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1263                                 bnx2_report_link(bp);
1264                         }
1265                         bnx2_write_phy(bp, bp->mii_adv, adv);
1266                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1267                 } else {
1268                         bnx2_resolve_flow_ctrl(bp);
1269                         bnx2_set_mac_link(bp);
1270                 }
1271                 return 0;
1272         }
1273
1274         bnx2_test_and_enable_2g5(bp);
1275
1276         if (bp->advertising & ADVERTISED_1000baseT_Full)
1277                 new_adv |= ADVERTISE_1000XFULL;
1278
1279         new_adv |= bnx2_phy_get_pause_adv(bp);
1280
1281         bnx2_read_phy(bp, bp->mii_adv, &adv);
1282         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1283
1284         bp->serdes_an_pending = 0;
1285         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1286                 /* Force a link down visible on the other side */
1287                 if (bp->link_up) {
1288                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1289                         spin_unlock_bh(&bp->phy_lock);
1290                         msleep(20);
1291                         spin_lock_bh(&bp->phy_lock);
1292                 }
1293
1294                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1295                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1296                         BMCR_ANENABLE);
1297                 /* Speed up link-up time when the link partner
1298                  * does not autonegotiate which is very common
1299                  * in blade servers. Some blade servers use
1300                  * IPMI for kerboard input and it's important
1301                  * to minimize link disruptions. Autoneg. involves
1302                  * exchanging base pages plus 3 next pages and
1303                  * normally completes in about 120 msec.
1304                  */
1305                 bp->current_interval = SERDES_AN_TIMEOUT;
1306                 bp->serdes_an_pending = 1;
1307                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1308         } else {
1309                 bnx2_resolve_flow_ctrl(bp);
1310                 bnx2_set_mac_link(bp);
1311         }
1312
1313         return 0;
1314 }
1315
1316 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1317         (ADVERTISED_1000baseT_Full)
1318
1319 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1320         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1321         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1322         ADVERTISED_1000baseT_Full)
1323
1324 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1325         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1326
1327 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1328
1329 static int
1330 bnx2_setup_copper_phy(struct bnx2 *bp)
1331 {
1332         u32 bmcr;
1333         u32 new_bmcr;
1334
1335         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1336
1337         if (bp->autoneg & AUTONEG_SPEED) {
1338                 u32 adv_reg, adv1000_reg;
1339                 u32 new_adv_reg = 0;
1340                 u32 new_adv1000_reg = 0;
1341
1342                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1343                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1344                         ADVERTISE_PAUSE_ASYM);
1345
1346                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1347                 adv1000_reg &= PHY_ALL_1000_SPEED;
1348
1349                 if (bp->advertising & ADVERTISED_10baseT_Half)
1350                         new_adv_reg |= ADVERTISE_10HALF;
1351                 if (bp->advertising & ADVERTISED_10baseT_Full)
1352                         new_adv_reg |= ADVERTISE_10FULL;
1353                 if (bp->advertising & ADVERTISED_100baseT_Half)
1354                         new_adv_reg |= ADVERTISE_100HALF;
1355                 if (bp->advertising & ADVERTISED_100baseT_Full)
1356                         new_adv_reg |= ADVERTISE_100FULL;
1357                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358                         new_adv1000_reg |= ADVERTISE_1000FULL;
1359
1360                 new_adv_reg |= ADVERTISE_CSMA;
1361
1362                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1363
1364                 if ((adv1000_reg != new_adv1000_reg) ||
1365                         (adv_reg != new_adv_reg) ||
1366                         ((bmcr & BMCR_ANENABLE) == 0)) {
1367
1368                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1369                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1370                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1371                                 BMCR_ANENABLE);
1372                 }
1373                 else if (bp->link_up) {
1374                         /* Flow ctrl may have changed from auto to forced */
1375                         /* or vice-versa. */
1376
1377                         bnx2_resolve_flow_ctrl(bp);
1378                         bnx2_set_mac_link(bp);
1379                 }
1380                 return 0;
1381         }
1382
1383         new_bmcr = 0;
1384         if (bp->req_line_speed == SPEED_100) {
1385                 new_bmcr |= BMCR_SPEED100;
1386         }
1387         if (bp->req_duplex == DUPLEX_FULL) {
1388                 new_bmcr |= BMCR_FULLDPLX;
1389         }
1390         if (new_bmcr != bmcr) {
1391                 u32 bmsr;
1392
1393                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1394                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1395
1396                 if (bmsr & BMSR_LSTATUS) {
1397                         /* Force link down */
1398                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1399                         spin_unlock_bh(&bp->phy_lock);
1400                         msleep(50);
1401                         spin_lock_bh(&bp->phy_lock);
1402
1403                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1404                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1405                 }
1406
1407                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1408
1409                 /* Normally, the new speed is setup after the link has
1410                  * gone down and up again. In some cases, link will not go
1411                  * down so we need to set up the new speed here.
1412                  */
1413                 if (bmsr & BMSR_LSTATUS) {
1414                         bp->line_speed = bp->req_line_speed;
1415                         bp->duplex = bp->req_duplex;
1416                         bnx2_resolve_flow_ctrl(bp);
1417                         bnx2_set_mac_link(bp);
1418                 }
1419         } else {
1420                 bnx2_resolve_flow_ctrl(bp);
1421                 bnx2_set_mac_link(bp);
1422         }
1423         return 0;
1424 }
1425
1426 static int
1427 bnx2_setup_phy(struct bnx2 *bp)
1428 {
1429         if (bp->loopback == MAC_LOOPBACK)
1430                 return 0;
1431
1432         if (bp->phy_flags & PHY_SERDES_FLAG) {
1433                 return (bnx2_setup_serdes_phy(bp));
1434         }
1435         else {
1436                 return (bnx2_setup_copper_phy(bp));
1437         }
1438 }
1439
1440 static int
1441 bnx2_init_5709s_phy(struct bnx2 *bp)
1442 {
1443         u32 val;
1444
1445         bp->mii_bmcr = MII_BMCR + 0x10;
1446         bp->mii_bmsr = MII_BMSR + 0x10;
1447         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1448         bp->mii_adv = MII_ADVERTISE + 0x10;
1449         bp->mii_lpa = MII_LPA + 0x10;
1450         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1451
1452         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1453         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1454
1455         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1456         bnx2_reset_phy(bp);
1457
1458         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1459
1460         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1461         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1462         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1463         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1464
1465         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1466         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1467         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1468                 val |= BCM5708S_UP1_2G5;
1469         else
1470                 val &= ~BCM5708S_UP1_2G5;
1471         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1472
1473         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1474         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1475         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1476         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1477
1478         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1479
1480         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1481               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1482         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1483
1484         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1485
1486         return 0;
1487 }
1488
1489 static int
1490 bnx2_init_5708s_phy(struct bnx2 *bp)
1491 {
1492         u32 val;
1493
1494         bnx2_reset_phy(bp);
1495
1496         bp->mii_up1 = BCM5708S_UP1;
1497
1498         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1499         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1500         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1501
1502         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1503         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1504         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1505
1506         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1507         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1508         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1509
1510         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1511                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1512                 val |= BCM5708S_UP1_2G5;
1513                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1514         }
1515
1516         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1517             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1518             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1519                 /* increase tx signal amplitude */
1520                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1521                                BCM5708S_BLK_ADDR_TX_MISC);
1522                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1523                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1524                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1525                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1526         }
1527
1528         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1529               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1530
1531         if (val) {
1532                 u32 is_backplane;
1533
1534                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1535                                           BNX2_SHARED_HW_CFG_CONFIG);
1536                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1537                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1538                                        BCM5708S_BLK_ADDR_TX_MISC);
1539                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1540                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1541                                        BCM5708S_BLK_ADDR_DIG);
1542                 }
1543         }
1544         return 0;
1545 }
1546
1547 static int
1548 bnx2_init_5706s_phy(struct bnx2 *bp)
1549 {
1550         bnx2_reset_phy(bp);
1551
1552         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1553
1554         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1555                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1556
1557         if (bp->dev->mtu > 1500) {
1558                 u32 val;
1559
1560                 /* Set extended packet length bit */
1561                 bnx2_write_phy(bp, 0x18, 0x7);
1562                 bnx2_read_phy(bp, 0x18, &val);
1563                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1564
1565                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1566                 bnx2_read_phy(bp, 0x1c, &val);
1567                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1568         }
1569         else {
1570                 u32 val;
1571
1572                 bnx2_write_phy(bp, 0x18, 0x7);
1573                 bnx2_read_phy(bp, 0x18, &val);
1574                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1575
1576                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1577                 bnx2_read_phy(bp, 0x1c, &val);
1578                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1579         }
1580
1581         return 0;
1582 }
1583
1584 static int
1585 bnx2_init_copper_phy(struct bnx2 *bp)
1586 {
1587         u32 val;
1588
1589         bnx2_reset_phy(bp);
1590
1591         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1592                 bnx2_write_phy(bp, 0x18, 0x0c00);
1593                 bnx2_write_phy(bp, 0x17, 0x000a);
1594                 bnx2_write_phy(bp, 0x15, 0x310b);
1595                 bnx2_write_phy(bp, 0x17, 0x201f);
1596                 bnx2_write_phy(bp, 0x15, 0x9506);
1597                 bnx2_write_phy(bp, 0x17, 0x401f);
1598                 bnx2_write_phy(bp, 0x15, 0x14e2);
1599                 bnx2_write_phy(bp, 0x18, 0x0400);
1600         }
1601
1602         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1603                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1604                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1605                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1606                 val &= ~(1 << 8);
1607                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1608         }
1609
1610         if (bp->dev->mtu > 1500) {
1611                 /* Set extended packet length bit */
1612                 bnx2_write_phy(bp, 0x18, 0x7);
1613                 bnx2_read_phy(bp, 0x18, &val);
1614                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1615
1616                 bnx2_read_phy(bp, 0x10, &val);
1617                 bnx2_write_phy(bp, 0x10, val | 0x1);
1618         }
1619         else {
1620                 bnx2_write_phy(bp, 0x18, 0x7);
1621                 bnx2_read_phy(bp, 0x18, &val);
1622                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1623
1624                 bnx2_read_phy(bp, 0x10, &val);
1625                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1626         }
1627
1628         /* ethernet@wirespeed */
1629         bnx2_write_phy(bp, 0x18, 0x7007);
1630         bnx2_read_phy(bp, 0x18, &val);
1631         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1632         return 0;
1633 }
1634
1635
1636 static int
1637 bnx2_init_phy(struct bnx2 *bp)
1638 {
1639         u32 val;
1640         int rc = 0;
1641
1642         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1643         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1644
1645         bp->mii_bmcr = MII_BMCR;
1646         bp->mii_bmsr = MII_BMSR;
1647         bp->mii_bmsr1 = MII_BMSR;
1648         bp->mii_adv = MII_ADVERTISE;
1649         bp->mii_lpa = MII_LPA;
1650
1651         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1652
1653         bnx2_read_phy(bp, MII_PHYSID1, &val);
1654         bp->phy_id = val << 16;
1655         bnx2_read_phy(bp, MII_PHYSID2, &val);
1656         bp->phy_id |= val & 0xffff;
1657
1658         if (bp->phy_flags & PHY_SERDES_FLAG) {
1659                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1660                         rc = bnx2_init_5706s_phy(bp);
1661                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1662                         rc = bnx2_init_5708s_phy(bp);
1663                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1664                         rc = bnx2_init_5709s_phy(bp);
1665         }
1666         else {
1667                 rc = bnx2_init_copper_phy(bp);
1668         }
1669
1670         bnx2_setup_phy(bp);
1671
1672         return rc;
1673 }
1674
1675 static int
1676 bnx2_set_mac_loopback(struct bnx2 *bp)
1677 {
1678         u32 mac_mode;
1679
1680         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1681         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1682         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1683         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1684         bp->link_up = 1;
1685         return 0;
1686 }
1687
1688 static int bnx2_test_link(struct bnx2 *);
1689
1690 static int
1691 bnx2_set_phy_loopback(struct bnx2 *bp)
1692 {
1693         u32 mac_mode;
1694         int rc, i;
1695
1696         spin_lock_bh(&bp->phy_lock);
1697         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1698                             BMCR_SPEED1000);
1699         spin_unlock_bh(&bp->phy_lock);
1700         if (rc)
1701                 return rc;
1702
1703         for (i = 0; i < 10; i++) {
1704                 if (bnx2_test_link(bp) == 0)
1705                         break;
1706                 msleep(100);
1707         }
1708
1709         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1710         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1711                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1712                       BNX2_EMAC_MODE_25G_MODE);
1713
1714         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1715         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1716         bp->link_up = 1;
1717         return 0;
1718 }
1719
1720 static int
1721 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1722 {
1723         int i;
1724         u32 val;
1725
1726         bp->fw_wr_seq++;
1727         msg_data |= bp->fw_wr_seq;
1728
1729         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1730
1731         /* wait for an acknowledgement. */
1732         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1733                 msleep(10);
1734
1735                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1736
1737                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1738                         break;
1739         }
1740         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1741                 return 0;
1742
1743         /* If we timed out, inform the firmware that this is the case. */
1744         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1745                 if (!silent)
1746                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1747                                             "%x\n", msg_data);
1748
1749                 msg_data &= ~BNX2_DRV_MSG_CODE;
1750                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1751
1752                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1753
1754                 return -EBUSY;
1755         }
1756
1757         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1758                 return -EIO;
1759
1760         return 0;
1761 }
1762
1763 static int
1764 bnx2_init_5709_context(struct bnx2 *bp)
1765 {
1766         int i, ret = 0;
1767         u32 val;
1768
1769         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1770         val |= (BCM_PAGE_BITS - 8) << 16;
1771         REG_WR(bp, BNX2_CTX_COMMAND, val);
1772         for (i = 0; i < bp->ctx_pages; i++) {
1773                 int j;
1774
1775                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1776                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1777                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1778                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1779                        (u64) bp->ctx_blk_mapping[i] >> 32);
1780                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1781                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1782                 for (j = 0; j < 10; j++) {
1783
1784                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1785                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1786                                 break;
1787                         udelay(5);
1788                 }
1789                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1790                         ret = -EBUSY;
1791                         break;
1792                 }
1793         }
1794         return ret;
1795 }
1796
1797 static void
1798 bnx2_init_context(struct bnx2 *bp)
1799 {
1800         u32 vcid;
1801
1802         vcid = 96;
1803         while (vcid) {
1804                 u32 vcid_addr, pcid_addr, offset;
1805
1806                 vcid--;
1807
1808                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1809                         u32 new_vcid;
1810
1811                         vcid_addr = GET_PCID_ADDR(vcid);
1812                         if (vcid & 0x8) {
1813                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1814                         }
1815                         else {
1816                                 new_vcid = vcid;
1817                         }
1818                         pcid_addr = GET_PCID_ADDR(new_vcid);
1819                 }
1820                 else {
1821                         vcid_addr = GET_CID_ADDR(vcid);
1822                         pcid_addr = vcid_addr;
1823                 }
1824
1825                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1826                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1827
1828                 /* Zero out the context. */
1829                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1830                         CTX_WR(bp, 0x00, offset, 0);
1831                 }
1832
1833                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1834                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1835         }
1836 }
1837
1838 static int
1839 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1840 {
1841         u16 *good_mbuf;
1842         u32 good_mbuf_cnt;
1843         u32 val;
1844
1845         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1846         if (good_mbuf == NULL) {
1847                 printk(KERN_ERR PFX "Failed to allocate memory in "
1848                                     "bnx2_alloc_bad_rbuf\n");
1849                 return -ENOMEM;
1850         }
1851
1852         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1853                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1854
1855         good_mbuf_cnt = 0;
1856
1857         /* Allocate a bunch of mbufs and save the good ones in an array. */
1858         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1859         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1860                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1861
1862                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1863
1864                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1865
1866                 /* The addresses with Bit 9 set are bad memory blocks. */
1867                 if (!(val & (1 << 9))) {
1868                         good_mbuf[good_mbuf_cnt] = (u16) val;
1869                         good_mbuf_cnt++;
1870                 }
1871
1872                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1873         }
1874
1875         /* Free the good ones back to the mbuf pool thus discarding
1876          * all the bad ones. */
1877         while (good_mbuf_cnt) {
1878                 good_mbuf_cnt--;
1879
1880                 val = good_mbuf[good_mbuf_cnt];
1881                 val = (val << 9) | val | 1;
1882
1883                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1884         }
1885         kfree(good_mbuf);
1886         return 0;
1887 }
1888
1889 static void
1890 bnx2_set_mac_addr(struct bnx2 *bp)
1891 {
1892         u32 val;
1893         u8 *mac_addr = bp->dev->dev_addr;
1894
1895         val = (mac_addr[0] << 8) | mac_addr[1];
1896
1897         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1898
1899         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1900                 (mac_addr[4] << 8) | mac_addr[5];
1901
1902         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1903 }
1904
1905 static inline int
1906 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1907 {
1908         struct sk_buff *skb;
1909         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1910         dma_addr_t mapping;
1911         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1912         unsigned long align;
1913
1914         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1915         if (skb == NULL) {
1916                 return -ENOMEM;
1917         }
1918
1919         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1920                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1921
1922         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1923                 PCI_DMA_FROMDEVICE);
1924
1925         rx_buf->skb = skb;
1926         pci_unmap_addr_set(rx_buf, mapping, mapping);
1927
1928         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1929         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1930
1931         bp->rx_prod_bseq += bp->rx_buf_use_size;
1932
1933         return 0;
1934 }
1935
1936 static void
1937 bnx2_phy_int(struct bnx2 *bp)
1938 {
1939         u32 new_link_state, old_link_state;
1940
1941         new_link_state = bp->status_blk->status_attn_bits &
1942                 STATUS_ATTN_BITS_LINK_STATE;
1943         old_link_state = bp->status_blk->status_attn_bits_ack &
1944                 STATUS_ATTN_BITS_LINK_STATE;
1945         if (new_link_state != old_link_state) {
1946                 if (new_link_state) {
1947                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1948                                 STATUS_ATTN_BITS_LINK_STATE);
1949                 }
1950                 else {
1951                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1952                                 STATUS_ATTN_BITS_LINK_STATE);
1953                 }
1954                 bnx2_set_link(bp);
1955         }
1956 }
1957
1958 static void
1959 bnx2_tx_int(struct bnx2 *bp)
1960 {
1961         struct status_block *sblk = bp->status_blk;
1962         u16 hw_cons, sw_cons, sw_ring_cons;
1963         int tx_free_bd = 0;
1964
1965         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1966         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1967                 hw_cons++;
1968         }
1969         sw_cons = bp->tx_cons;
1970
1971         while (sw_cons != hw_cons) {
1972                 struct sw_bd *tx_buf;
1973                 struct sk_buff *skb;
1974                 int i, last;
1975
1976                 sw_ring_cons = TX_RING_IDX(sw_cons);
1977
1978                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1979                 skb = tx_buf->skb;
1980
1981                 /* partial BD completions possible with TSO packets */
1982                 if (skb_is_gso(skb)) {
1983                         u16 last_idx, last_ring_idx;
1984
1985                         last_idx = sw_cons +
1986                                 skb_shinfo(skb)->nr_frags + 1;
1987                         last_ring_idx = sw_ring_cons +
1988                                 skb_shinfo(skb)->nr_frags + 1;
1989                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1990                                 last_idx++;
1991                         }
1992                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1993                                 break;
1994                         }
1995                 }
1996
1997                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1998                         skb_headlen(skb), PCI_DMA_TODEVICE);
1999
2000                 tx_buf->skb = NULL;
2001                 last = skb_shinfo(skb)->nr_frags;
2002
2003                 for (i = 0; i < last; i++) {
2004                         sw_cons = NEXT_TX_BD(sw_cons);
2005
2006                         pci_unmap_page(bp->pdev,
2007                                 pci_unmap_addr(
2008                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2009                                         mapping),
2010                                 skb_shinfo(skb)->frags[i].size,
2011                                 PCI_DMA_TODEVICE);
2012                 }
2013
2014                 sw_cons = NEXT_TX_BD(sw_cons);
2015
2016                 tx_free_bd += last + 1;
2017
2018                 dev_kfree_skb(skb);
2019
2020                 hw_cons = bp->hw_tx_cons =
2021                         sblk->status_tx_quick_consumer_index0;
2022
2023                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2024                         hw_cons++;
2025                 }
2026         }
2027
2028         bp->tx_cons = sw_cons;
2029         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2030          * before checking for netif_queue_stopped().  Without the
2031          * memory barrier, there is a small possibility that bnx2_start_xmit()
2032          * will miss it and cause the queue to be stopped forever.
2033          */
2034         smp_mb();
2035
2036         if (unlikely(netif_queue_stopped(bp->dev)) &&
2037                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2038                 netif_tx_lock(bp->dev);
2039                 if ((netif_queue_stopped(bp->dev)) &&
2040                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2041                         netif_wake_queue(bp->dev);
2042                 netif_tx_unlock(bp->dev);
2043         }
2044 }
2045
2046 static inline void
2047 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2048         u16 cons, u16 prod)
2049 {
2050         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2051         struct rx_bd *cons_bd, *prod_bd;
2052
2053         cons_rx_buf = &bp->rx_buf_ring[cons];
2054         prod_rx_buf = &bp->rx_buf_ring[prod];
2055
2056         pci_dma_sync_single_for_device(bp->pdev,
2057                 pci_unmap_addr(cons_rx_buf, mapping),
2058                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2059
2060         bp->rx_prod_bseq += bp->rx_buf_use_size;
2061
2062         prod_rx_buf->skb = skb;
2063
2064         if (cons == prod)
2065                 return;
2066
2067         pci_unmap_addr_set(prod_rx_buf, mapping,
2068                         pci_unmap_addr(cons_rx_buf, mapping));
2069
2070         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2071         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2072         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2073         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2074 }
2075
2076 static int
2077 bnx2_rx_int(struct bnx2 *bp, int budget)
2078 {
2079         struct status_block *sblk = bp->status_blk;
2080         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2081         struct l2_fhdr *rx_hdr;
2082         int rx_pkt = 0;
2083
2084         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2085         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2086                 hw_cons++;
2087         }
2088         sw_cons = bp->rx_cons;
2089         sw_prod = bp->rx_prod;
2090
2091         /* Memory barrier necessary as speculative reads of the rx
2092          * buffer can be ahead of the index in the status block
2093          */
2094         rmb();
2095         while (sw_cons != hw_cons) {
2096                 unsigned int len;
2097                 u32 status;
2098                 struct sw_bd *rx_buf;
2099                 struct sk_buff *skb;
2100                 dma_addr_t dma_addr;
2101
2102                 sw_ring_cons = RX_RING_IDX(sw_cons);
2103                 sw_ring_prod = RX_RING_IDX(sw_prod);
2104
2105                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2106                 skb = rx_buf->skb;
2107
2108                 rx_buf->skb = NULL;
2109
2110                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2111
2112                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2113                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2114
2115                 rx_hdr = (struct l2_fhdr *) skb->data;
2116                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2117
2118                 if ((status = rx_hdr->l2_fhdr_status) &
2119                         (L2_FHDR_ERRORS_BAD_CRC |
2120                         L2_FHDR_ERRORS_PHY_DECODE |
2121                         L2_FHDR_ERRORS_ALIGNMENT |
2122                         L2_FHDR_ERRORS_TOO_SHORT |
2123                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2124
2125                         goto reuse_rx;
2126                 }
2127
2128                 /* Since we don't have a jumbo ring, copy small packets
2129                  * if mtu > 1500
2130                  */
2131                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2132                         struct sk_buff *new_skb;
2133
2134                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2135                         if (new_skb == NULL)
2136                                 goto reuse_rx;
2137
2138                         /* aligned copy */
2139                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2140                                       new_skb->data, len + 2);
2141                         skb_reserve(new_skb, 2);
2142                         skb_put(new_skb, len);
2143
2144                         bnx2_reuse_rx_skb(bp, skb,
2145                                 sw_ring_cons, sw_ring_prod);
2146
2147                         skb = new_skb;
2148                 }
2149                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2150                         pci_unmap_single(bp->pdev, dma_addr,
2151                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2152
2153                         skb_reserve(skb, bp->rx_offset);
2154                         skb_put(skb, len);
2155                 }
2156                 else {
2157 reuse_rx:
2158                         bnx2_reuse_rx_skb(bp, skb,
2159                                 sw_ring_cons, sw_ring_prod);
2160                         goto next_rx;
2161                 }
2162
2163                 skb->protocol = eth_type_trans(skb, bp->dev);
2164
2165                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2166                         (ntohs(skb->protocol) != 0x8100)) {
2167
2168                         dev_kfree_skb(skb);
2169                         goto next_rx;
2170
2171                 }
2172
2173                 skb->ip_summed = CHECKSUM_NONE;
2174                 if (bp->rx_csum &&
2175                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2176                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2177
2178                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2179                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2180                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2181                 }
2182
2183 #ifdef BCM_VLAN
2184                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2185                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2186                                 rx_hdr->l2_fhdr_vlan_tag);
2187                 }
2188                 else
2189 #endif
2190                         netif_receive_skb(skb);
2191
2192                 bp->dev->last_rx = jiffies;
2193                 rx_pkt++;
2194
2195 next_rx:
2196                 sw_cons = NEXT_RX_BD(sw_cons);
2197                 sw_prod = NEXT_RX_BD(sw_prod);
2198
2199                 if ((rx_pkt == budget))
2200                         break;
2201
2202                 /* Refresh hw_cons to see if there is new work */
2203                 if (sw_cons == hw_cons) {
2204                         hw_cons = bp->hw_rx_cons =
2205                                 sblk->status_rx_quick_consumer_index0;
2206                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2207                                 hw_cons++;
2208                         rmb();
2209                 }
2210         }
2211         bp->rx_cons = sw_cons;
2212         bp->rx_prod = sw_prod;
2213
2214         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2215
2216         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2217
2218         mmiowb();
2219
2220         return rx_pkt;
2221
2222 }
2223
2224 /* MSI ISR - The only difference between this and the INTx ISR
2225  * is that the MSI interrupt is always serviced.
2226  */
2227 static irqreturn_t
2228 bnx2_msi(int irq, void *dev_instance)
2229 {
2230         struct net_device *dev = dev_instance;
2231         struct bnx2 *bp = netdev_priv(dev);
2232
2233         prefetch(bp->status_blk);
2234         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2235                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2236                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2237
2238         /* Return here if interrupt is disabled. */
2239         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2240                 return IRQ_HANDLED;
2241
2242         netif_rx_schedule(dev);
2243
2244         return IRQ_HANDLED;
2245 }
2246
2247 static irqreturn_t
2248 bnx2_interrupt(int irq, void *dev_instance)
2249 {
2250         struct net_device *dev = dev_instance;
2251         struct bnx2 *bp = netdev_priv(dev);
2252
2253         /* When using INTx, it is possible for the interrupt to arrive
2254          * at the CPU before the status block posted prior to the
2255          * interrupt. Reading a register will flush the status block.
2256          * When using MSI, the MSI message will always complete after
2257          * the status block write.
2258          */
2259         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2260             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2261              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2262                 return IRQ_NONE;
2263
2264         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2265                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2266                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2267
2268         /* Return here if interrupt is shared and is disabled. */
2269         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2270                 return IRQ_HANDLED;
2271
2272         netif_rx_schedule(dev);
2273
2274         return IRQ_HANDLED;
2275 }
2276
2277 static inline int
2278 bnx2_has_work(struct bnx2 *bp)
2279 {
2280         struct status_block *sblk = bp->status_blk;
2281
2282         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2283             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2284                 return 1;
2285
2286         if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2287             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2288                 return 1;
2289
2290         return 0;
2291 }
2292
2293 static int
2294 bnx2_poll(struct net_device *dev, int *budget)
2295 {
2296         struct bnx2 *bp = netdev_priv(dev);
2297
2298         if ((bp->status_blk->status_attn_bits &
2299                 STATUS_ATTN_BITS_LINK_STATE) !=
2300                 (bp->status_blk->status_attn_bits_ack &
2301                 STATUS_ATTN_BITS_LINK_STATE)) {
2302
2303                 spin_lock(&bp->phy_lock);
2304                 bnx2_phy_int(bp);
2305                 spin_unlock(&bp->phy_lock);
2306
2307                 /* This is needed to take care of transient status
2308                  * during link changes.
2309                  */
2310                 REG_WR(bp, BNX2_HC_COMMAND,
2311                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2312                 REG_RD(bp, BNX2_HC_COMMAND);
2313         }
2314
2315         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2316                 bnx2_tx_int(bp);
2317
2318         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2319                 int orig_budget = *budget;
2320                 int work_done;
2321
2322                 if (orig_budget > dev->quota)
2323                         orig_budget = dev->quota;
2324
2325                 work_done = bnx2_rx_int(bp, orig_budget);
2326                 *budget -= work_done;
2327                 dev->quota -= work_done;
2328         }
2329
2330         bp->last_status_idx = bp->status_blk->status_idx;
2331         rmb();
2332
2333         if (!bnx2_has_work(bp)) {
2334                 netif_rx_complete(dev);
2335                 if (likely(bp->flags & USING_MSI_FLAG)) {
2336                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2337                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2338                                bp->last_status_idx);
2339                         return 0;
2340                 }
2341                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2342                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2343                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2344                        bp->last_status_idx);
2345
2346                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2347                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2348                        bp->last_status_idx);
2349                 return 0;
2350         }
2351
2352         return 1;
2353 }
2354
2355 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2356  * from set_multicast.
2357  */
2358 static void
2359 bnx2_set_rx_mode(struct net_device *dev)
2360 {
2361         struct bnx2 *bp = netdev_priv(dev);
2362         u32 rx_mode, sort_mode;
2363         int i;
2364
2365         spin_lock_bh(&bp->phy_lock);
2366
2367         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2368                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2369         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2370 #ifdef BCM_VLAN
2371         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2372                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2373 #else
2374         if (!(bp->flags & ASF_ENABLE_FLAG))
2375                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2376 #endif
2377         if (dev->flags & IFF_PROMISC) {
2378                 /* Promiscuous mode. */
2379                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2380                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2381                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2382         }
2383         else if (dev->flags & IFF_ALLMULTI) {
2384                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2385                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2386                                0xffffffff);
2387                 }
2388                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2389         }
2390         else {
2391                 /* Accept one or more multicast(s). */
2392                 struct dev_mc_list *mclist;
2393                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2394                 u32 regidx;
2395                 u32 bit;
2396                 u32 crc;
2397
2398                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2399
2400                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2401                      i++, mclist = mclist->next) {
2402
2403                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2404                         bit = crc & 0xff;
2405                         regidx = (bit & 0xe0) >> 5;
2406                         bit &= 0x1f;
2407                         mc_filter[regidx] |= (1 << bit);
2408                 }
2409
2410                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2411                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2412                                mc_filter[i]);
2413                 }
2414
2415                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2416         }
2417
2418         if (rx_mode != bp->rx_mode) {
2419                 bp->rx_mode = rx_mode;
2420                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2421         }
2422
2423         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2425         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2426
2427         spin_unlock_bh(&bp->phy_lock);
2428 }
2429
2430 #define FW_BUF_SIZE     0x8000
2431
2432 static int
2433 bnx2_gunzip_init(struct bnx2 *bp)
2434 {
2435         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2436                 goto gunzip_nomem1;
2437
2438         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2439                 goto gunzip_nomem2;
2440
2441         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2442         if (bp->strm->workspace == NULL)
2443                 goto gunzip_nomem3;
2444
2445         return 0;
2446
2447 gunzip_nomem3:
2448         kfree(bp->strm);
2449         bp->strm = NULL;
2450
2451 gunzip_nomem2:
2452         vfree(bp->gunzip_buf);
2453         bp->gunzip_buf = NULL;
2454
2455 gunzip_nomem1:
2456         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2457                             "uncompression.\n", bp->dev->name);
2458         return -ENOMEM;
2459 }
2460
2461 static void
2462 bnx2_gunzip_end(struct bnx2 *bp)
2463 {
2464         kfree(bp->strm->workspace);
2465
2466         kfree(bp->strm);
2467         bp->strm = NULL;
2468
2469         if (bp->gunzip_buf) {
2470                 vfree(bp->gunzip_buf);
2471                 bp->gunzip_buf = NULL;
2472         }
2473 }
2474
2475 static int
2476 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2477 {
2478         int n, rc;
2479
2480         /* check gzip header */
2481         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2482                 return -EINVAL;
2483
2484         n = 10;
2485
2486 #define FNAME   0x8
2487         if (zbuf[3] & FNAME)
2488                 while ((zbuf[n++] != 0) && (n < len));
2489
2490         bp->strm->next_in = zbuf + n;
2491         bp->strm->avail_in = len - n;
2492         bp->strm->next_out = bp->gunzip_buf;
2493         bp->strm->avail_out = FW_BUF_SIZE;
2494
2495         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2496         if (rc != Z_OK)
2497                 return rc;
2498
2499         rc = zlib_inflate(bp->strm, Z_FINISH);
2500
2501         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2502         *outbuf = bp->gunzip_buf;
2503
2504         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2505                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2506                        bp->dev->name, bp->strm->msg);
2507
2508         zlib_inflateEnd(bp->strm);
2509
2510         if (rc == Z_STREAM_END)
2511                 return 0;
2512
2513         return rc;
2514 }
2515
2516 static void
2517 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2518         u32 rv2p_proc)
2519 {
2520         int i;
2521         u32 val;
2522
2523
2524         for (i = 0; i < rv2p_code_len; i += 8) {
2525                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2526                 rv2p_code++;
2527                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2528                 rv2p_code++;
2529
2530                 if (rv2p_proc == RV2P_PROC1) {
2531                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2532                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2533                 }
2534                 else {
2535                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2536                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2537                 }
2538         }
2539
2540         /* Reset the processor, un-stall is done later. */
2541         if (rv2p_proc == RV2P_PROC1) {
2542                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2543         }
2544         else {
2545                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2546         }
2547 }
2548
2549 static int
2550 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2551 {
2552         u32 offset;
2553         u32 val;
2554         int rc;
2555
2556         /* Halt the CPU. */
2557         val = REG_RD_IND(bp, cpu_reg->mode);
2558         val |= cpu_reg->mode_value_halt;
2559         REG_WR_IND(bp, cpu_reg->mode, val);
2560         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2561
2562         /* Load the Text area. */
2563         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2564         if (fw->gz_text) {
2565                 u32 text_len;
2566                 void *text;
2567
2568                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2569                                  &text_len);
2570                 if (rc)
2571                         return rc;
2572
2573                 fw->text = text;
2574         }
2575         if (fw->gz_text) {
2576                 int j;
2577
2578                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2579                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2580                 }
2581         }
2582
2583         /* Load the Data area. */
2584         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2585         if (fw->data) {
2586                 int j;
2587
2588                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2589                         REG_WR_IND(bp, offset, fw->data[j]);
2590                 }
2591         }
2592
2593         /* Load the SBSS area. */
2594         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2595         if (fw->sbss) {
2596                 int j;
2597
2598                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2599                         REG_WR_IND(bp, offset, fw->sbss[j]);
2600                 }
2601         }
2602
2603         /* Load the BSS area. */
2604         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2605         if (fw->bss) {
2606                 int j;
2607
2608                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2609                         REG_WR_IND(bp, offset, fw->bss[j]);
2610                 }
2611         }
2612
2613         /* Load the Read-Only area. */
2614         offset = cpu_reg->spad_base +
2615                 (fw->rodata_addr - cpu_reg->mips_view_base);
2616         if (fw->rodata) {
2617                 int j;
2618
2619                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2620                         REG_WR_IND(bp, offset, fw->rodata[j]);
2621                 }
2622         }
2623
2624         /* Clear the pre-fetch instruction. */
2625         REG_WR_IND(bp, cpu_reg->inst, 0);
2626         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2627
2628         /* Start the CPU. */
2629         val = REG_RD_IND(bp, cpu_reg->mode);
2630         val &= ~cpu_reg->mode_value_halt;
2631         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2632         REG_WR_IND(bp, cpu_reg->mode, val);
2633
2634         return 0;
2635 }
2636
2637 static int
2638 bnx2_init_cpus(struct bnx2 *bp)
2639 {
2640         struct cpu_reg cpu_reg;
2641         struct fw_info *fw;
2642         int rc = 0;
2643         void *text;
2644         u32 text_len;
2645
2646         if ((rc = bnx2_gunzip_init(bp)) != 0)
2647                 return rc;
2648
2649         /* Initialize the RV2P processor. */
2650         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2651                          &text_len);
2652         if (rc)
2653                 goto init_cpu_err;
2654
2655         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2656
2657         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2658                          &text_len);
2659         if (rc)
2660                 goto init_cpu_err;
2661
2662         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2663
2664         /* Initialize the RX Processor. */
2665         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2666         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2667         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2668         cpu_reg.state = BNX2_RXP_CPU_STATE;
2669         cpu_reg.state_value_clear = 0xffffff;
2670         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2671         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2672         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2673         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2674         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2675         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2676         cpu_reg.mips_view_base = 0x8000000;
2677
2678         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2679                 fw = &bnx2_rxp_fw_09;
2680         else
2681                 fw = &bnx2_rxp_fw_06;
2682
2683         rc = load_cpu_fw(bp, &cpu_reg, fw);
2684         if (rc)
2685                 goto init_cpu_err;
2686
2687         /* Initialize the TX Processor. */
2688         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2689         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2690         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2691         cpu_reg.state = BNX2_TXP_CPU_STATE;
2692         cpu_reg.state_value_clear = 0xffffff;
2693         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2694         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2695         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2696         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2697         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2698         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2699         cpu_reg.mips_view_base = 0x8000000;
2700
2701         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2702                 fw = &bnx2_txp_fw_09;
2703         else
2704                 fw = &bnx2_txp_fw_06;
2705
2706         rc = load_cpu_fw(bp, &cpu_reg, fw);
2707         if (rc)
2708                 goto init_cpu_err;
2709
2710         /* Initialize the TX Patch-up Processor. */
2711         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2712         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2713         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2714         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2715         cpu_reg.state_value_clear = 0xffffff;
2716         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2717         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2718         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2719         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2720         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2721         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2722         cpu_reg.mips_view_base = 0x8000000;
2723
2724         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2725                 fw = &bnx2_tpat_fw_09;
2726         else
2727                 fw = &bnx2_tpat_fw_06;
2728
2729         rc = load_cpu_fw(bp, &cpu_reg, fw);
2730         if (rc)
2731                 goto init_cpu_err;
2732
2733         /* Initialize the Completion Processor. */
2734         cpu_reg.mode = BNX2_COM_CPU_MODE;
2735         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2736         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2737         cpu_reg.state = BNX2_COM_CPU_STATE;
2738         cpu_reg.state_value_clear = 0xffffff;
2739         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2740         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2741         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2742         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2743         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2744         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2745         cpu_reg.mips_view_base = 0x8000000;
2746
2747         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2748                 fw = &bnx2_com_fw_09;
2749         else
2750                 fw = &bnx2_com_fw_06;
2751
2752         rc = load_cpu_fw(bp, &cpu_reg, fw);
2753         if (rc)
2754                 goto init_cpu_err;
2755
2756         /* Initialize the Command Processor. */
2757         cpu_reg.mode = BNX2_CP_CPU_MODE;
2758         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2759         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2760         cpu_reg.state = BNX2_CP_CPU_STATE;
2761         cpu_reg.state_value_clear = 0xffffff;
2762         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2763         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2764         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2765         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2766         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2767         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2768         cpu_reg.mips_view_base = 0x8000000;
2769
2770         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2771                 fw = &bnx2_cp_fw_09;
2772
2773                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2774                 if (rc)
2775                         goto init_cpu_err;
2776         }
2777 init_cpu_err:
2778         bnx2_gunzip_end(bp);
2779         return rc;
2780 }
2781
2782 static int
2783 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2784 {
2785         u16 pmcsr;
2786
2787         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2788
2789         switch (state) {
2790         case PCI_D0: {
2791                 u32 val;
2792
2793                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2794                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2795                         PCI_PM_CTRL_PME_STATUS);
2796
2797                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2798                         /* delay required during transition out of D3hot */
2799                         msleep(20);
2800
2801                 val = REG_RD(bp, BNX2_EMAC_MODE);
2802                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2803                 val &= ~BNX2_EMAC_MODE_MPKT;
2804                 REG_WR(bp, BNX2_EMAC_MODE, val);
2805
2806                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2807                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2808                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2809                 break;
2810         }
2811         case PCI_D3hot: {
2812                 int i;
2813                 u32 val, wol_msg;
2814
2815                 if (bp->wol) {
2816                         u32 advertising;
2817                         u8 autoneg;
2818
2819                         autoneg = bp->autoneg;
2820                         advertising = bp->advertising;
2821
2822                         bp->autoneg = AUTONEG_SPEED;
2823                         bp->advertising = ADVERTISED_10baseT_Half |
2824                                 ADVERTISED_10baseT_Full |
2825                                 ADVERTISED_100baseT_Half |
2826                                 ADVERTISED_100baseT_Full |
2827                                 ADVERTISED_Autoneg;
2828
2829                         bnx2_setup_copper_phy(bp);
2830
2831                         bp->autoneg = autoneg;
2832                         bp->advertising = advertising;
2833
2834                         bnx2_set_mac_addr(bp);
2835
2836                         val = REG_RD(bp, BNX2_EMAC_MODE);
2837
2838                         /* Enable port mode. */
2839                         val &= ~BNX2_EMAC_MODE_PORT;
2840                         val |= BNX2_EMAC_MODE_PORT_MII |
2841                                BNX2_EMAC_MODE_MPKT_RCVD |
2842                                BNX2_EMAC_MODE_ACPI_RCVD |
2843                                BNX2_EMAC_MODE_MPKT;
2844
2845                         REG_WR(bp, BNX2_EMAC_MODE, val);
2846
2847                         /* receive all multicast */
2848                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2849                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2850                                        0xffffffff);
2851                         }
2852                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2853                                BNX2_EMAC_RX_MODE_SORT_MODE);
2854
2855                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2856                               BNX2_RPM_SORT_USER0_MC_EN;
2857                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2858                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2859                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2860                                BNX2_RPM_SORT_USER0_ENA);
2861
2862                         /* Need to enable EMAC and RPM for WOL. */
2863                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2864                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2865                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2866                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2867
2868                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2869                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2870                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2871
2872                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2873                 }
2874                 else {
2875                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2876                 }
2877
2878                 if (!(bp->flags & NO_WOL_FLAG))
2879                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2880
2881                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2882                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2883                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2884
2885                         if (bp->wol)
2886                                 pmcsr |= 3;
2887                 }
2888                 else {
2889                         pmcsr |= 3;
2890                 }
2891                 if (bp->wol) {
2892                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2893                 }
2894                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2895                                       pmcsr);
2896
2897                 /* No more memory access after this point until
2898                  * device is brought back to D0.
2899                  */
2900                 udelay(50);
2901                 break;
2902         }
2903         default:
2904                 return -EINVAL;
2905         }
2906         return 0;
2907 }
2908
2909 static int
2910 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2911 {
2912         u32 val;
2913         int j;
2914
2915         /* Request access to the flash interface. */
2916         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2917         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2918                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2919                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2920                         break;
2921
2922                 udelay(5);
2923         }
2924
2925         if (j >= NVRAM_TIMEOUT_COUNT)
2926                 return -EBUSY;
2927
2928         return 0;
2929 }
2930
2931 static int
2932 bnx2_release_nvram_lock(struct bnx2 *bp)
2933 {
2934         int j;
2935         u32 val;
2936
2937         /* Relinquish nvram interface. */
2938         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2939
2940         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2941                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2942                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2943                         break;
2944
2945                 udelay(5);
2946         }
2947
2948         if (j >= NVRAM_TIMEOUT_COUNT)
2949                 return -EBUSY;
2950
2951         return 0;
2952 }
2953
2954
2955 static int
2956 bnx2_enable_nvram_write(struct bnx2 *bp)
2957 {
2958         u32 val;
2959
2960         val = REG_RD(bp, BNX2_MISC_CFG);
2961         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2962
2963         if (!bp->flash_info->buffered) {
2964                 int j;
2965
2966                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2967                 REG_WR(bp, BNX2_NVM_COMMAND,
2968                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2969
2970                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2971                         udelay(5);
2972
2973                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2974                         if (val & BNX2_NVM_COMMAND_DONE)
2975                                 break;
2976                 }
2977
2978                 if (j >= NVRAM_TIMEOUT_COUNT)
2979                         return -EBUSY;
2980         }
2981         return 0;
2982 }
2983
2984 static void
2985 bnx2_disable_nvram_write(struct bnx2 *bp)
2986 {
2987         u32 val;
2988
2989         val = REG_RD(bp, BNX2_MISC_CFG);
2990         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2991 }
2992
2993
2994 static void
2995 bnx2_enable_nvram_access(struct bnx2 *bp)
2996 {
2997         u32 val;
2998
2999         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3000         /* Enable both bits, even on read. */
3001         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3002                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3003 }
3004
3005 static void
3006 bnx2_disable_nvram_access(struct bnx2 *bp)
3007 {
3008         u32 val;
3009
3010         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3011         /* Disable both bits, even after read. */
3012         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3013                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3014                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3015 }
3016
3017 static int
3018 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3019 {
3020         u32 cmd;
3021         int j;
3022
3023         if (bp->flash_info->buffered)
3024                 /* Buffered flash, no erase needed */
3025                 return 0;
3026
3027         /* Build an erase command */
3028         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3029               BNX2_NVM_COMMAND_DOIT;
3030
3031         /* Need to clear DONE bit separately. */
3032         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3033
3034         /* Address of the NVRAM to read from. */
3035         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3036
3037         /* Issue an erase command. */
3038         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3039
3040         /* Wait for completion. */
3041         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3042                 u32 val;
3043
3044                 udelay(5);
3045
3046                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3047                 if (val & BNX2_NVM_COMMAND_DONE)
3048                         break;
3049         }
3050
3051         if (j >= NVRAM_TIMEOUT_COUNT)
3052                 return -EBUSY;
3053
3054         return 0;
3055 }
3056
3057 static int
3058 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3059 {
3060         u32 cmd;
3061         int j;
3062
3063         /* Build the command word. */
3064         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3065
3066         /* Calculate an offset of a buffered flash. */
3067         if (bp->flash_info->buffered) {
3068                 offset = ((offset / bp->flash_info->page_size) <<
3069                            bp->flash_info->page_bits) +
3070                           (offset % bp->flash_info->page_size);
3071         }
3072
3073         /* Need to clear DONE bit separately. */
3074         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3075
3076         /* Address of the NVRAM to read from. */
3077         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3078
3079         /* Issue a read command. */
3080         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3081
3082         /* Wait for completion. */
3083         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3084                 u32 val;
3085
3086                 udelay(5);
3087
3088                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3089                 if (val & BNX2_NVM_COMMAND_DONE) {
3090                         val = REG_RD(bp, BNX2_NVM_READ);
3091
3092                         val = be32_to_cpu(val);
3093                         memcpy(ret_val, &val, 4);
3094                         break;
3095                 }
3096         }
3097         if (j >= NVRAM_TIMEOUT_COUNT)
3098                 return -EBUSY;
3099
3100         return 0;
3101 }
3102
3103
3104 static int
3105 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3106 {
3107         u32 cmd, val32;
3108         int j;
3109
3110         /* Build the command word. */
3111         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3112
3113         /* Calculate an offset of a buffered flash. */
3114         if (bp->flash_info->buffered) {
3115                 offset = ((offset / bp->flash_info->page_size) <<
3116                           bp->flash_info->page_bits) +
3117                          (offset % bp->flash_info->page_size);
3118         }
3119
3120         /* Need to clear DONE bit separately. */
3121         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3122
3123         memcpy(&val32, val, 4);
3124         val32 = cpu_to_be32(val32);
3125
3126         /* Write the data. */
3127         REG_WR(bp, BNX2_NVM_WRITE, val32);
3128
3129         /* Address of the NVRAM to write to. */
3130         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3131
3132         /* Issue the write command. */
3133         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3134
3135         /* Wait for completion. */
3136         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3137                 udelay(5);
3138
3139                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3140                         break;
3141         }
3142         if (j >= NVRAM_TIMEOUT_COUNT)
3143                 return -EBUSY;
3144
3145         return 0;
3146 }
3147
3148 static int
3149 bnx2_init_nvram(struct bnx2 *bp)
3150 {
3151         u32 val;
3152         int j, entry_count, rc;
3153         struct flash_spec *flash;
3154
3155         /* Determine the selected interface. */
3156         val = REG_RD(bp, BNX2_NVM_CFG1);
3157
3158         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3159
3160         rc = 0;
3161         if (val & 0x40000000) {
3162
3163                 /* Flash interface has been reconfigured */
3164                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3165                      j++, flash++) {
3166                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3167                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3168                                 bp->flash_info = flash;
3169                                 break;
3170                         }
3171                 }
3172         }
3173         else {
3174                 u32 mask;
3175                 /* Not yet been reconfigured */
3176
3177                 if (val & (1 << 23))
3178                         mask = FLASH_BACKUP_STRAP_MASK;
3179                 else
3180                         mask = FLASH_STRAP_MASK;
3181
3182                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3183                         j++, flash++) {
3184
3185                         if ((val & mask) == (flash->strapping & mask)) {
3186                                 bp->flash_info = flash;
3187
3188                                 /* Request access to the flash interface. */
3189                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3190                                         return rc;
3191
3192                                 /* Enable access to flash interface */
3193                                 bnx2_enable_nvram_access(bp);
3194
3195                                 /* Reconfigure the flash interface */
3196                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3197                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3198                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3199                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3200
3201                                 /* Disable access to flash interface */
3202                                 bnx2_disable_nvram_access(bp);
3203                                 bnx2_release_nvram_lock(bp);
3204
3205                                 break;
3206                         }
3207                 }
3208         } /* if (val & 0x40000000) */
3209
3210         if (j == entry_count) {
3211                 bp->flash_info = NULL;
3212                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3213                 return -ENODEV;
3214         }
3215
3216         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3217         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3218         if (val)
3219                 bp->flash_size = val;
3220         else
3221                 bp->flash_size = bp->flash_info->total_size;
3222
3223         return rc;
3224 }
3225
3226 static int
3227 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3228                 int buf_size)
3229 {
3230         int rc = 0;
3231         u32 cmd_flags, offset32, len32, extra;
3232
3233         if (buf_size == 0)
3234                 return 0;
3235
3236         /* Request access to the flash interface. */
3237         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3238                 return rc;
3239
3240         /* Enable access to flash interface */
3241         bnx2_enable_nvram_access(bp);
3242
3243         len32 = buf_size;
3244         offset32 = offset;
3245         extra = 0;
3246
3247         cmd_flags = 0;
3248
3249         if (offset32 & 3) {
3250                 u8 buf[4];
3251                 u32 pre_len;
3252
3253                 offset32 &= ~3;
3254                 pre_len = 4 - (offset & 3);
3255
3256                 if (pre_len >= len32) {
3257                         pre_len = len32;
3258                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3259                                     BNX2_NVM_COMMAND_LAST;
3260                 }
3261                 else {
3262                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3263                 }
3264
3265                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3266
3267                 if (rc)
3268                         return rc;
3269
3270                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3271
3272                 offset32 += 4;
3273                 ret_buf += pre_len;
3274                 len32 -= pre_len;
3275         }
3276         if (len32 & 3) {
3277                 extra = 4 - (len32 & 3);
3278                 len32 = (len32 + 4) & ~3;
3279         }
3280
3281         if (len32 == 4) {
3282                 u8 buf[4];
3283
3284                 if (cmd_flags)
3285                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3286                 else
3287                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3288                                     BNX2_NVM_COMMAND_LAST;
3289
3290                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3291
3292                 memcpy(ret_buf, buf, 4 - extra);
3293         }
3294         else if (len32 > 0) {
3295                 u8 buf[4];
3296
3297                 /* Read the first word. */
3298                 if (cmd_flags)
3299                         cmd_flags = 0;
3300                 else
3301                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3302
3303                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3304
3305                 /* Advance to the next dword. */
3306                 offset32 += 4;
3307                 ret_buf += 4;
3308                 len32 -= 4;
3309
3310                 while (len32 > 4 && rc == 0) {
3311                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3312
3313                         /* Advance to the next dword. */
3314                         offset32 += 4;
3315                         ret_buf += 4;
3316                         len32 -= 4;
3317                 }
3318
3319                 if (rc)
3320                         return rc;
3321
3322                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3323                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3324
3325                 memcpy(ret_buf, buf, 4 - extra);
3326         }
3327
3328         /* Disable access to flash interface */
3329         bnx2_disable_nvram_access(bp);
3330
3331         bnx2_release_nvram_lock(bp);
3332
3333         return rc;
3334 }
3335
3336 static int
3337 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3338                 int buf_size)
3339 {
3340         u32 written, offset32, len32;
3341         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3342         int rc = 0;
3343         int align_start, align_end;
3344
3345         buf = data_buf;
3346         offset32 = offset;
3347         len32 = buf_size;
3348         align_start = align_end = 0;
3349
3350         if ((align_start = (offset32 & 3))) {
3351                 offset32 &= ~3;
3352                 len32 += align_start;
3353                 if (len32 < 4)
3354                         len32 = 4;
3355                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3356                         return rc;
3357         }
3358
3359         if (len32 & 3) {
3360                 align_end = 4 - (len32 & 3);
3361                 len32 += align_end;
3362                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3363                         return rc;
3364         }
3365
3366         if (align_start || align_end) {
3367                 align_buf = kmalloc(len32, GFP_KERNEL);
3368                 if (align_buf == NULL)
3369                         return -ENOMEM;
3370                 if (align_start) {
3371                         memcpy(align_buf, start, 4);
3372                 }
3373                 if (align_end) {
3374                         memcpy(align_buf + len32 - 4, end, 4);
3375                 }
3376                 memcpy(align_buf + align_start, data_buf, buf_size);
3377                 buf = align_buf;
3378         }
3379
3380         if (bp->flash_info->buffered == 0) {
3381                 flash_buffer = kmalloc(264, GFP_KERNEL);
3382                 if (flash_buffer == NULL) {
3383                         rc = -ENOMEM;
3384                         goto nvram_write_end;
3385                 }
3386         }
3387
3388         written = 0;
3389         while ((written < len32) && (rc == 0)) {
3390                 u32 page_start, page_end, data_start, data_end;
3391                 u32 addr, cmd_flags;
3392                 int i;
3393
3394                 /* Find the page_start addr */
3395                 page_start = offset32 + written;
3396                 page_start -= (page_start % bp->flash_info->page_size);
3397                 /* Find the page_end addr */
3398                 page_end = page_start + bp->flash_info->page_size;
3399                 /* Find the data_start addr */
3400                 data_start = (written == 0) ? offset32 : page_start;
3401                 /* Find the data_end addr */
3402                 data_end = (page_end > offset32 + len32) ?
3403                         (offset32 + len32) : page_end;
3404
3405                 /* Request access to the flash interface. */
3406                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3407                         goto nvram_write_end;
3408
3409                 /* Enable access to flash interface */
3410                 bnx2_enable_nvram_access(bp);
3411
3412                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3413                 if (bp->flash_info->buffered == 0) {
3414                         int j;
3415
3416                         /* Read the whole page into the buffer
3417                          * (non-buffer flash only) */
3418                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3419                                 if (j == (bp->flash_info->page_size - 4)) {
3420                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3421                                 }
3422                                 rc = bnx2_nvram_read_dword(bp,
3423                                         page_start + j,
3424                                         &flash_buffer[j],
3425                                         cmd_flags);
3426
3427                                 if (rc)
3428                                         goto nvram_write_end;
3429
3430                                 cmd_flags = 0;
3431                         }
3432                 }
3433
3434                 /* Enable writes to flash interface (unlock write-protect) */
3435                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3436                         goto nvram_write_end;
3437
3438                 /* Loop to write back the buffer data from page_start to
3439                  * data_start */
3440                 i = 0;
3441                 if (bp->flash_info->buffered == 0) {
3442                         /* Erase the page */
3443                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3444                                 goto nvram_write_end;
3445
3446                         /* Re-enable the write again for the actual write */
3447                         bnx2_enable_nvram_write(bp);
3448
3449                         for (addr = page_start; addr < data_start;
3450                                 addr += 4, i += 4) {
3451
3452                                 rc = bnx2_nvram_write_dword(bp, addr,
3453                                         &flash_buffer[i], cmd_flags);
3454
3455                                 if (rc != 0)
3456                                         goto nvram_write_end;
3457
3458                                 cmd_flags = 0;
3459                         }
3460                 }
3461
3462                 /* Loop to write the new data from data_start to data_end */
3463                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3464                         if ((addr == page_end - 4) ||
3465                                 ((bp->flash_info->buffered) &&
3466                                  (addr == data_end - 4))) {
3467
3468                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3469                         }
3470                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3471                                 cmd_flags);
3472
3473                         if (rc != 0)
3474                                 goto nvram_write_end;
3475
3476                         cmd_flags = 0;
3477                         buf += 4;
3478                 }
3479
3480                 /* Loop to write back the buffer data from data_end
3481                  * to page_end */
3482                 if (bp->flash_info->buffered == 0) {
3483                         for (addr = data_end; addr < page_end;
3484                                 addr += 4, i += 4) {
3485
3486                                 if (addr == page_end-4) {
3487                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3488                                 }
3489                                 rc = bnx2_nvram_write_dword(bp, addr,
3490                                         &flash_buffer[i], cmd_flags);
3491
3492                                 if (rc != 0)
3493                                         goto nvram_write_end;
3494
3495                                 cmd_flags = 0;
3496                         }
3497                 }
3498
3499                 /* Disable writes to flash interface (lock write-protect) */
3500                 bnx2_disable_nvram_write(bp);
3501
3502                 /* Disable access to flash interface */
3503                 bnx2_disable_nvram_access(bp);
3504                 bnx2_release_nvram_lock(bp);
3505
3506                 /* Increment written */
3507                 written += data_end - data_start;
3508         }
3509
3510 nvram_write_end:
3511         kfree(flash_buffer);
3512         kfree(align_buf);
3513         return rc;
3514 }
3515
3516 static int
3517 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3518 {
3519         u32 val;
3520         int i, rc = 0;
3521
3522         /* Wait for the current PCI transaction to complete before
3523          * issuing a reset. */
3524         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3525                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3526                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3527                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3528                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3529         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3530         udelay(5);
3531
3532         /* Wait for the firmware to tell us it is ok to issue a reset. */
3533         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3534
3535         /* Deposit a driver reset signature so the firmware knows that
3536          * this is a soft reset. */
3537         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3538                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3539
3540         /* Do a dummy read to force the chip to complete all current transaction
3541          * before we issue a reset. */
3542         val = REG_RD(bp, BNX2_MISC_ID);
3543
3544         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3545                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3546                 REG_RD(bp, BNX2_MISC_COMMAND);
3547                 udelay(5);
3548
3549                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3550                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3551
3552                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3553
3554         } else {
3555                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3556                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3557                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3558
3559                 /* Chip reset. */
3560                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3561
3562                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3563                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3564                         current->state = TASK_UNINTERRUPTIBLE;
3565                         schedule_timeout(HZ / 50);
3566                 }
3567
3568                 /* Reset takes approximate 30 usec */
3569                 for (i = 0; i < 10; i++) {
3570                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3571                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3572                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3573                                 break;
3574                         udelay(10);
3575                 }
3576
3577                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3578                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3579                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3580                         return -EBUSY;
3581                 }
3582         }
3583
3584         /* Make sure byte swapping is properly configured. */
3585         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3586         if (val != 0x01020304) {
3587                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3588                 return -ENODEV;
3589         }
3590
3591         /* Wait for the firmware to finish its initialization. */
3592         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3593         if (rc)
3594                 return rc;
3595
3596         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3597                 /* Adjust the voltage regular to two steps lower.  The default
3598                  * of this register is 0x0000000e. */
3599                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3600
3601                 /* Remove bad rbuf memory from the free pool. */
3602                 rc = bnx2_alloc_bad_rbuf(bp);
3603         }
3604
3605         return rc;
3606 }
3607
3608 static int
3609 bnx2_init_chip(struct bnx2 *bp)
3610 {
3611         u32 val;
3612         int rc;
3613
3614         /* Make sure the interrupt is not active. */
3615         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3616
3617         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3618               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3619 #ifdef __BIG_ENDIAN
3620               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3621 #endif
3622               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3623               DMA_READ_CHANS << 12 |
3624               DMA_WRITE_CHANS << 16;
3625
3626         val |= (0x2 << 20) | (1 << 11);
3627
3628         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3629                 val |= (1 << 23);
3630
3631         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3632             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3633                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3634
3635         REG_WR(bp, BNX2_DMA_CONFIG, val);
3636
3637         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3638                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3639                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3640                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3641         }
3642
3643         if (bp->flags & PCIX_FLAG) {
3644                 u16 val16;
3645
3646                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3647                                      &val16);
3648                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3649                                       val16 & ~PCI_X_CMD_ERO);
3650         }
3651
3652         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3653                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3654                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3655                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3656
3657         /* Initialize context mapping and zero out the quick contexts.  The
3658          * context block must have already been enabled. */
3659         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3660                 bnx2_init_5709_context(bp);
3661         else
3662                 bnx2_init_context(bp);
3663
3664         if ((rc = bnx2_init_cpus(bp)) != 0)
3665                 return rc;
3666
3667         bnx2_init_nvram(bp);
3668
3669         bnx2_set_mac_addr(bp);
3670
3671         val = REG_RD(bp, BNX2_MQ_CONFIG);
3672         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3673         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3674         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3675                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3676
3677         REG_WR(bp, BNX2_MQ_CONFIG, val);
3678
3679         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3680         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3681         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3682
3683         val = (BCM_PAGE_BITS - 8) << 24;
3684         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3685
3686         /* Configure page size. */
3687         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3688         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3689         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3690         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3691
3692         val = bp->mac_addr[0] +
3693               (bp->mac_addr[1] << 8) +
3694               (bp->mac_addr[2] << 16) +
3695               bp->mac_addr[3] +
3696               (bp->mac_addr[4] << 8) +
3697               (bp->mac_addr[5] << 16);
3698         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3699
3700         /* Program the MTU.  Also include 4 bytes for CRC32. */
3701         val = bp->dev->mtu + ETH_HLEN + 4;
3702         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3703                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3704         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3705
3706         bp->last_status_idx = 0;
3707         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3708
3709         /* Set up how to generate a link change interrupt. */
3710         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3711
3712         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3713                (u64) bp->status_blk_mapping & 0xffffffff);
3714         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3715
3716         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3717                (u64) bp->stats_blk_mapping & 0xffffffff);
3718         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3719                (u64) bp->stats_blk_mapping >> 32);
3720
3721         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3722                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3723
3724         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3725                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3726
3727         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3728                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3729
3730         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3731
3732         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3733
3734         REG_WR(bp, BNX2_HC_COM_TICKS,
3735                (bp->com_ticks_int << 16) | bp->com_ticks);
3736
3737         REG_WR(bp, BNX2_HC_CMD_TICKS,
3738                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3739
3740         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3741         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3742
3743         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3744                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3745         else {
3746                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3747                        BNX2_HC_CONFIG_TX_TMR_MODE |
3748                        BNX2_HC_CONFIG_COLLECT_STATS);
3749         }
3750
3751         /* Clear internal stats counters. */
3752         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3753
3754         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3755
3756         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3757             BNX2_PORT_FEATURE_ASF_ENABLED)
3758                 bp->flags |= ASF_ENABLE_FLAG;
3759
3760         /* Initialize the receive filter. */
3761         bnx2_set_rx_mode(bp->dev);
3762
3763         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3764                           0);
3765
3766         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3767         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3768
3769         udelay(20);
3770
3771         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3772
3773         return rc;
3774 }
3775
3776 static void
3777 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3778 {
3779         u32 val, offset0, offset1, offset2, offset3;
3780
3781         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3782                 offset0 = BNX2_L2CTX_TYPE_XI;
3783                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3784                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3785                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3786         } else {
3787                 offset0 = BNX2_L2CTX_TYPE;
3788                 offset1 = BNX2_L2CTX_CMD_TYPE;
3789                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3790                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3791         }
3792         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3793         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3794
3795         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3796         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3797
3798         val = (u64) bp->tx_desc_mapping >> 32;
3799         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3800
3801         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3802         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3803 }
3804
3805 static void
3806 bnx2_init_tx_ring(struct bnx2 *bp)
3807 {
3808         struct tx_bd *txbd;
3809         u32 cid;
3810
3811         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3812
3813         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3814
3815         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3816         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3817
3818         bp->tx_prod = 0;
3819         bp->tx_cons = 0;
3820         bp->hw_tx_cons = 0;
3821         bp->tx_prod_bseq = 0;
3822
3823         cid = TX_CID;
3824         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3825         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3826
3827         bnx2_init_tx_context(bp, cid);
3828 }
3829
3830 static void
3831 bnx2_init_rx_ring(struct bnx2 *bp)
3832 {
3833         struct rx_bd *rxbd;
3834         int i;
3835         u16 prod, ring_prod;
3836         u32 val;
3837
3838         /* 8 for CRC and VLAN */
3839         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3840         /* hw alignment */
3841         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3842
3843         ring_prod = prod = bp->rx_prod = 0;
3844         bp->rx_cons = 0;
3845         bp->hw_rx_cons = 0;
3846         bp->rx_prod_bseq = 0;
3847
3848         for (i = 0; i < bp->rx_max_ring; i++) {
3849                 int j;
3850
3851                 rxbd = &bp->rx_desc_ring[i][0];
3852                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3853                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3854                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3855                 }
3856                 if (i == (bp->rx_max_ring - 1))
3857                         j = 0;
3858                 else
3859                         j = i + 1;
3860                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3861                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3862                                        0xffffffff;
3863         }
3864
3865         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3866         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3867         val |= 0x02 << 8;
3868         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3869
3870         val = (u64) bp->rx_desc_mapping[0] >> 32;
3871         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3872
3873         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3874         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3875
3876         for (i = 0; i < bp->rx_ring_size; i++) {
3877                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3878                         break;
3879                 }
3880                 prod = NEXT_RX_BD(prod);
3881                 ring_prod = RX_RING_IDX(prod);
3882         }
3883         bp->rx_prod = prod;
3884
3885         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3886
3887         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3888 }
3889
3890 static void
3891 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3892 {
3893         u32 num_rings, max;
3894
3895         bp->rx_ring_size = size;
3896         num_rings = 1;
3897         while (size > MAX_RX_DESC_CNT) {
3898                 size -= MAX_RX_DESC_CNT;
3899                 num_rings++;
3900         }
3901         /* round to next power of 2 */
3902         max = MAX_RX_RINGS;
3903         while ((max & num_rings) == 0)
3904                 max >>= 1;
3905
3906         if (num_rings != max)
3907                 max <<= 1;
3908
3909         bp->rx_max_ring = max;
3910         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3911 }
3912
3913 static void
3914 bnx2_free_tx_skbs(struct bnx2 *bp)
3915 {
3916         int i;
3917
3918         if (bp->tx_buf_ring == NULL)
3919                 return;
3920
3921         for (i = 0; i < TX_DESC_CNT; ) {
3922                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3923                 struct sk_buff *skb = tx_buf->skb;
3924                 int j, last;
3925
3926                 if (skb == NULL) {
3927                         i++;
3928                         continue;
3929                 }
3930
3931                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3932                         skb_headlen(skb), PCI_DMA_TODEVICE);
3933
3934                 tx_buf->skb = NULL;
3935
3936                 last = skb_shinfo(skb)->nr_frags;
3937                 for (j = 0; j < last; j++) {
3938                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3939                         pci_unmap_page(bp->pdev,
3940                                 pci_unmap_addr(tx_buf, mapping),
3941                                 skb_shinfo(skb)->frags[j].size,
3942                                 PCI_DMA_TODEVICE);
3943                 }
3944                 dev_kfree_skb(skb);
3945                 i += j + 1;
3946         }
3947
3948 }
3949
3950 static void
3951 bnx2_free_rx_skbs(struct bnx2 *bp)
3952 {
3953         int i;
3954
3955         if (bp->rx_buf_ring == NULL)
3956                 return;
3957
3958         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3959                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3960                 struct sk_buff *skb = rx_buf->skb;
3961
3962                 if (skb == NULL)
3963                         continue;
3964
3965                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3966                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3967
3968                 rx_buf->skb = NULL;
3969
3970                 dev_kfree_skb(skb);
3971         }
3972 }
3973
3974 static void
3975 bnx2_free_skbs(struct bnx2 *bp)
3976 {
3977         bnx2_free_tx_skbs(bp);
3978         bnx2_free_rx_skbs(bp);
3979 }
3980
3981 static int
3982 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3983 {
3984         int rc;
3985
3986         rc = bnx2_reset_chip(bp, reset_code);
3987         bnx2_free_skbs(bp);
3988         if (rc)
3989                 return rc;
3990
3991         if ((rc = bnx2_init_chip(bp)) != 0)
3992                 return rc;
3993
3994         bnx2_init_tx_ring(bp);
3995         bnx2_init_rx_ring(bp);
3996         return 0;
3997 }
3998
3999 static int
4000 bnx2_init_nic(struct bnx2 *bp)
4001 {
4002         int rc;
4003
4004         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4005                 return rc;
4006
4007         spin_lock_bh(&bp->phy_lock);
4008         bnx2_init_phy(bp);
4009         spin_unlock_bh(&bp->phy_lock);
4010         bnx2_set_link(bp);
4011         return 0;
4012 }
4013
4014 static int
4015 bnx2_test_registers(struct bnx2 *bp)
4016 {
4017         int ret;
4018         int i, is_5709;
4019         static const struct {
4020                 u16   offset;
4021                 u16   flags;
4022 #define BNX2_FL_NOT_5709        1
4023                 u32   rw_mask;
4024                 u32   ro_mask;
4025         } reg_tbl[] = {
4026                 { 0x006c, 0, 0x00000000, 0x0000003f },
4027                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4028                 { 0x0094, 0, 0x00000000, 0x00000000 },
4029
4030                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4031                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4032                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4033                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4034                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4035                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4036                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4037                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4038                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4039
4040                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4041                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4042                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4043                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4044                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4045                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4046
4047                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4048                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4049                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4050
4051                 { 0x1000, 0, 0x00000000, 0x00000001 },
4052                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4053
4054                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4055                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4056                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4057                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4058                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4059                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4060                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4061                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4062                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4063                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4064
4065                 { 0x1800, 0, 0x00000000, 0x00000001 },
4066                 { 0x1804, 0, 0x00000000, 0x00000003 },
4067
4068                 { 0x2800, 0, 0x00000000, 0x00000001 },
4069                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4070                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4071                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4072                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4073                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4074                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4075                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4076                 { 0x2840, 0, 0x00000000, 0xffffffff },
4077                 { 0x2844, 0, 0x00000000, 0xffffffff },
4078                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4079                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4080
4081                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4082                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4083
4084                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4085                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4086                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4087                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4088                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4089                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4090                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4091                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4092                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4093
4094                 { 0x5004, 0, 0x00000000, 0x0000007f },
4095                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4096
4097                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4098                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4099                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4100                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4101                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4102                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4103                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4104                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4105                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4106
4107                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4108                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4109                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4110                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4111                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4112                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4113                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4114                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4115                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4116                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4117                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4118                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4119                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4120                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4121                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4122                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4123                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4124                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4125                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4126                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4127                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4128                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4129                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4130
4131                 { 0xffff, 0, 0x00000000, 0x00000000 },
4132         };
4133
4134         ret = 0;
4135         is_5709 = 0;
4136         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4137                 is_5709 = 1;
4138
4139         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4140                 u32 offset, rw_mask, ro_mask, save_val, val;
4141                 u16 flags = reg_tbl[i].flags;
4142
4143                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4144                         continue;
4145
4146                 offset = (u32) reg_tbl[i].offset;
4147                 rw_mask = reg_tbl[i].rw_mask;
4148                 ro_mask = reg_tbl[i].ro_mask;
4149
4150                 save_val = readl(bp->regview + offset);
4151
4152                 writel(0, bp->regview + offset);
4153
4154                 val = readl(bp->regview + offset);
4155                 if ((val & rw_mask) != 0) {
4156                         goto reg_test_err;
4157                 }
4158
4159                 if ((val & ro_mask) != (save_val & ro_mask)) {
4160                         goto reg_test_err;
4161                 }
4162
4163                 writel(0xffffffff, bp->regview + offset);
4164
4165                 val = readl(bp->regview + offset);
4166                 if ((val & rw_mask) != rw_mask) {
4167                         goto reg_test_err;
4168                 }
4169
4170                 if ((val & ro_mask) != (save_val & ro_mask)) {
4171                         goto reg_test_err;
4172                 }
4173
4174                 writel(save_val, bp->regview + offset);
4175                 continue;
4176
4177 reg_test_err:
4178                 writel(save_val, bp->regview + offset);
4179                 ret = -ENODEV;
4180                 break;
4181         }
4182         return ret;
4183 }
4184
4185 static int
4186 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4187 {
4188         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4189                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4190         int i;
4191
4192         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4193                 u32 offset;
4194
4195                 for (offset = 0; offset < size; offset += 4) {
4196
4197                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4198
4199                         if (REG_RD_IND(bp, start + offset) !=
4200                                 test_pattern[i]) {
4201                                 return -ENODEV;
4202                         }
4203                 }
4204         }
4205         return 0;
4206 }
4207
4208 static int
4209 bnx2_test_memory(struct bnx2 *bp)
4210 {
4211         int ret = 0;
4212         int i;
4213         static struct mem_entry {
4214                 u32   offset;
4215                 u32   len;
4216         } mem_tbl_5706[] = {
4217                 { 0x60000,  0x4000 },
4218                 { 0xa0000,  0x3000 },
4219                 { 0xe0000,  0x4000 },
4220                 { 0x120000, 0x4000 },
4221                 { 0x1a0000, 0x4000 },
4222                 { 0x160000, 0x4000 },
4223                 { 0xffffffff, 0    },
4224         },
4225         mem_tbl_5709[] = {
4226                 { 0x60000,  0x4000 },
4227                 { 0xa0000,  0x3000 },
4228                 { 0xe0000,  0x4000 },
4229                 { 0x120000, 0x4000 },
4230                 { 0x1a0000, 0x4000 },
4231                 { 0xffffffff, 0    },
4232         };
4233         struct mem_entry *mem_tbl;
4234
4235         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4236                 mem_tbl = mem_tbl_5709;
4237         else
4238                 mem_tbl = mem_tbl_5706;
4239
4240         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4241                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4242                         mem_tbl[i].len)) != 0) {
4243                         return ret;
4244                 }
4245         }
4246
4247         return ret;
4248 }
4249
4250 #define BNX2_MAC_LOOPBACK       0
4251 #define BNX2_PHY_LOOPBACK       1
4252
4253 static int
4254 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4255 {
4256         unsigned int pkt_size, num_pkts, i;
4257         struct sk_buff *skb, *rx_skb;
4258         unsigned char *packet;
4259         u16 rx_start_idx, rx_idx;
4260         dma_addr_t map;
4261         struct tx_bd *txbd;
4262         struct sw_bd *rx_buf;
4263         struct l2_fhdr *rx_hdr;
4264         int ret = -ENODEV;
4265
4266         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4267                 bp->loopback = MAC_LOOPBACK;
4268                 bnx2_set_mac_loopback(bp);
4269         }
4270         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4271                 bp->loopback = PHY_LOOPBACK;
4272                 bnx2_set_phy_loopback(bp);
4273         }
4274         else
4275                 return -EINVAL;
4276
4277         pkt_size = 1514;
4278         skb = netdev_alloc_skb(bp->dev, pkt_size);
4279         if (!skb)
4280                 return -ENOMEM;
4281         packet = skb_put(skb, pkt_size);
4282         memcpy(packet, bp->dev->dev_addr, 6);
4283         memset(packet + 6, 0x0, 8);
4284         for (i = 14; i < pkt_size; i++)
4285                 packet[i] = (unsigned char) (i & 0xff);
4286
4287         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4288                 PCI_DMA_TODEVICE);
4289
4290         REG_WR(bp, BNX2_HC_COMMAND,
4291                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4292
4293         REG_RD(bp, BNX2_HC_COMMAND);
4294
4295         udelay(5);
4296         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4297
4298         num_pkts = 0;
4299
4300         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4301
4302         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4303         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4304         txbd->tx_bd_mss_nbytes = pkt_size;
4305         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4306
4307         num_pkts++;
4308         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4309         bp->tx_prod_bseq += pkt_size;
4310
4311         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4312         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4313
4314         udelay(100);
4315
4316         REG_WR(bp, BNX2_HC_COMMAND,
4317                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4318
4319         REG_RD(bp, BNX2_HC_COMMAND);
4320
4321         udelay(5);
4322
4323         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4324         dev_kfree_skb(skb);
4325
4326         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4327                 goto loopback_test_done;
4328         }
4329
4330         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4331         if (rx_idx != rx_start_idx + num_pkts) {
4332                 goto loopback_test_done;
4333         }
4334
4335         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4336         rx_skb = rx_buf->skb;
4337
4338         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4339         skb_reserve(rx_skb, bp->rx_offset);
4340
4341         pci_dma_sync_single_for_cpu(bp->pdev,
4342                 pci_unmap_addr(rx_buf, mapping),
4343                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4344
4345         if (rx_hdr->l2_fhdr_status &
4346                 (L2_FHDR_ERRORS_BAD_CRC |
4347                 L2_FHDR_ERRORS_PHY_DECODE |
4348                 L2_FHDR_ERRORS_ALIGNMENT |
4349                 L2_FHDR_ERRORS_TOO_SHORT |
4350                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4351
4352                 goto loopback_test_done;
4353         }
4354
4355         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4356                 goto loopback_test_done;
4357         }
4358
4359         for (i = 14; i < pkt_size; i++) {
4360                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4361                         goto loopback_test_done;
4362                 }
4363         }
4364
4365         ret = 0;
4366
4367 loopback_test_done:
4368         bp->loopback = 0;
4369         return ret;
4370 }
4371
4372 #define BNX2_MAC_LOOPBACK_FAILED        1
4373 #define BNX2_PHY_LOOPBACK_FAILED        2
4374 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4375                                          BNX2_PHY_LOOPBACK_FAILED)
4376
4377 static int
4378 bnx2_test_loopback(struct bnx2 *bp)
4379 {
4380         int rc = 0;
4381
4382         if (!netif_running(bp->dev))
4383                 return BNX2_LOOPBACK_FAILED;
4384
4385         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4386         spin_lock_bh(&bp->phy_lock);
4387         bnx2_init_phy(bp);
4388         spin_unlock_bh(&bp->phy_lock);
4389         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4390                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4391         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4392                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4393         return rc;
4394 }
4395
4396 #define NVRAM_SIZE 0x200
4397 #define CRC32_RESIDUAL 0xdebb20e3
4398
4399 static int
4400 bnx2_test_nvram(struct bnx2 *bp)
4401 {
4402         u32 buf[NVRAM_SIZE / 4];
4403         u8 *data = (u8 *) buf;
4404         int rc = 0;
4405         u32 magic, csum;
4406
4407         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4408                 goto test_nvram_done;
4409
4410         magic = be32_to_cpu(buf[0]);
4411         if (magic != 0x669955aa) {
4412                 rc = -ENODEV;
4413                 goto test_nvram_done;
4414         }
4415
4416         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4417                 goto test_nvram_done;
4418
4419         csum = ether_crc_le(0x100, data);
4420         if (csum != CRC32_RESIDUAL) {
4421                 rc = -ENODEV;
4422                 goto test_nvram_done;
4423         }
4424
4425         csum = ether_crc_le(0x100, data + 0x100);
4426         if (csum != CRC32_RESIDUAL) {
4427                 rc = -ENODEV;
4428         }
4429
4430 test_nvram_done:
4431         return rc;
4432 }
4433
4434 static int
4435 bnx2_test_link(struct bnx2 *bp)
4436 {
4437         u32 bmsr;
4438
4439         spin_lock_bh(&bp->phy_lock);
4440         bnx2_enable_bmsr1(bp);
4441         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4442         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4443         bnx2_disable_bmsr1(bp);
4444         spin_unlock_bh(&bp->phy_lock);
4445
4446         if (bmsr & BMSR_LSTATUS) {
4447                 return 0;
4448         }
4449         return -ENODEV;
4450 }
4451
4452 static int
4453 bnx2_test_intr(struct bnx2 *bp)
4454 {
4455         int i;
4456         u16 status_idx;
4457
4458         if (!netif_running(bp->dev))
4459                 return -ENODEV;
4460
4461         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4462
4463         /* This register is not touched during run-time. */
4464         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4465         REG_RD(bp, BNX2_HC_COMMAND);
4466
4467         for (i = 0; i < 10; i++) {
4468                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4469                         status_idx) {
4470
4471                         break;
4472                 }
4473
4474                 msleep_interruptible(10);
4475         }
4476         if (i < 10)
4477                 return 0;
4478
4479         return -ENODEV;
4480 }
4481
4482 static void
4483 bnx2_5706_serdes_timer(struct bnx2 *bp)
4484 {
4485         spin_lock(&bp->phy_lock);
4486         if (bp->serdes_an_pending)
4487                 bp->serdes_an_pending--;
4488         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4489                 u32 bmcr;
4490
4491                 bp->current_interval = bp->timer_interval;
4492
4493                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4494
4495                 if (bmcr & BMCR_ANENABLE) {
4496                         u32 phy1, phy2;
4497
4498                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4499                         bnx2_read_phy(bp, 0x1c, &phy1);
4500
4501                         bnx2_write_phy(bp, 0x17, 0x0f01);
4502                         bnx2_read_phy(bp, 0x15, &phy2);
4503                         bnx2_write_phy(bp, 0x17, 0x0f01);
4504                         bnx2_read_phy(bp, 0x15, &phy2);
4505
4506                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4507                                 !(phy2 & 0x20)) {       /* no CONFIG */
4508
4509                                 bmcr &= ~BMCR_ANENABLE;
4510                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4511                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4512                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4513                         }
4514                 }
4515         }
4516         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4517                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4518                 u32 phy2;
4519
4520                 bnx2_write_phy(bp, 0x17, 0x0f01);
4521                 bnx2_read_phy(bp, 0x15, &phy2);
4522                 if (phy2 & 0x20) {
4523                         u32 bmcr;
4524
4525                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4526                         bmcr |= BMCR_ANENABLE;
4527                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4528
4529                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4530                 }
4531         } else
4532                 bp->current_interval = bp->timer_interval;
4533
4534         spin_unlock(&bp->phy_lock);
4535 }
4536
4537 static void
4538 bnx2_5708_serdes_timer(struct bnx2 *bp)
4539 {
4540         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4541                 bp->serdes_an_pending = 0;
4542                 return;
4543         }
4544
4545         spin_lock(&bp->phy_lock);
4546         if (bp->serdes_an_pending)
4547                 bp->serdes_an_pending--;
4548         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4549                 u32 bmcr;
4550
4551                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4552                 if (bmcr & BMCR_ANENABLE) {
4553                         bnx2_enable_forced_2g5(bp);
4554                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4555                 } else {
4556                         bnx2_disable_forced_2g5(bp);
4557                         bp->serdes_an_pending = 2;
4558                         bp->current_interval = bp->timer_interval;
4559                 }
4560
4561         } else
4562                 bp->current_interval = bp->timer_interval;
4563
4564         spin_unlock(&bp->phy_lock);
4565 }
4566
4567 static void
4568 bnx2_timer(unsigned long data)
4569 {
4570         struct bnx2 *bp = (struct bnx2 *) data;
4571         u32 msg;
4572
4573         if (!netif_running(bp->dev))
4574                 return;
4575
4576         if (atomic_read(&bp->intr_sem) != 0)
4577                 goto bnx2_restart_timer;
4578
4579         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4580         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4581
4582         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4583
4584         if (bp->phy_flags & PHY_SERDES_FLAG) {
4585                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4586                         bnx2_5706_serdes_timer(bp);
4587                 else
4588                         bnx2_5708_serdes_timer(bp);
4589         }
4590
4591 bnx2_restart_timer:
4592         mod_timer(&bp->timer, jiffies + bp->current_interval);
4593 }
4594
4595 /* Called with rtnl_lock */
4596 static int
4597 bnx2_open(struct net_device *dev)
4598 {
4599         struct bnx2 *bp = netdev_priv(dev);
4600         int rc;
4601
4602         netif_carrier_off(dev);
4603
4604         bnx2_set_power_state(bp, PCI_D0);
4605         bnx2_disable_int(bp);
4606
4607         rc = bnx2_alloc_mem(bp);
4608         if (rc)
4609                 return rc;
4610
4611         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4612                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4613                 !disable_msi) {
4614
4615                 if (pci_enable_msi(bp->pdev) == 0) {
4616                         bp->flags |= USING_MSI_FLAG;
4617                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4618                                         dev);
4619                 }
4620                 else {
4621                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4622                                         IRQF_SHARED, dev->name, dev);
4623                 }
4624         }
4625         else {
4626                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4627                                 dev->name, dev);
4628         }
4629         if (rc) {
4630                 bnx2_free_mem(bp);
4631                 return rc;
4632         }
4633
4634         rc = bnx2_init_nic(bp);
4635
4636         if (rc) {
4637                 free_irq(bp->pdev->irq, dev);
4638                 if (bp->flags & USING_MSI_FLAG) {
4639                         pci_disable_msi(bp->pdev);
4640                         bp->flags &= ~USING_MSI_FLAG;
4641                 }
4642                 bnx2_free_skbs(bp);
4643                 bnx2_free_mem(bp);
4644                 return rc;
4645         }
4646
4647         mod_timer(&bp->timer, jiffies + bp->current_interval);
4648
4649         atomic_set(&bp->intr_sem, 0);
4650
4651         bnx2_enable_int(bp);
4652
4653         if (bp->flags & USING_MSI_FLAG) {
4654                 /* Test MSI to make sure it is working
4655                  * If MSI test fails, go back to INTx mode
4656                  */
4657                 if (bnx2_test_intr(bp) != 0) {
4658                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4659                                " using MSI, switching to INTx mode. Please"
4660                                " report this failure to the PCI maintainer"
4661                                " and include system chipset information.\n",
4662                                bp->dev->name);
4663
4664                         bnx2_disable_int(bp);
4665                         free_irq(bp->pdev->irq, dev);
4666                         pci_disable_msi(bp->pdev);
4667                         bp->flags &= ~USING_MSI_FLAG;
4668
4669                         rc = bnx2_init_nic(bp);
4670
4671                         if (!rc) {
4672                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4673                                         IRQF_SHARED, dev->name, dev);
4674                         }
4675                         if (rc) {
4676                                 bnx2_free_skbs(bp);
4677                                 bnx2_free_mem(bp);
4678                                 del_timer_sync(&bp->timer);
4679                                 return rc;
4680                         }
4681                         bnx2_enable_int(bp);
4682                 }
4683         }
4684         if (bp->flags & USING_MSI_FLAG) {
4685                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4686         }
4687
4688         netif_start_queue(dev);
4689
4690         return 0;
4691 }
4692
4693 static void
4694 bnx2_reset_task(struct work_struct *work)
4695 {
4696         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4697
4698         if (!netif_running(bp->dev))
4699                 return;
4700
4701         bp->in_reset_task = 1;
4702         bnx2_netif_stop(bp);
4703
4704         bnx2_init_nic(bp);
4705
4706         atomic_set(&bp->intr_sem, 1);
4707         bnx2_netif_start(bp);
4708         bp->in_reset_task = 0;
4709 }
4710
4711 static void
4712 bnx2_tx_timeout(struct net_device *dev)
4713 {
4714         struct bnx2 *bp = netdev_priv(dev);
4715
4716         /* This allows the netif to be shutdown gracefully before resetting */
4717         schedule_work(&bp->reset_task);
4718 }
4719
4720 #ifdef BCM_VLAN
4721 /* Called with rtnl_lock */
4722 static void
4723 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4724 {
4725         struct bnx2 *bp = netdev_priv(dev);
4726
4727         bnx2_netif_stop(bp);
4728
4729         bp->vlgrp = vlgrp;
4730         bnx2_set_rx_mode(dev);
4731
4732         bnx2_netif_start(bp);
4733 }
4734
4735 /* Called with rtnl_lock */
4736 static void
4737 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4738 {
4739         struct bnx2 *bp = netdev_priv(dev);
4740
4741         bnx2_netif_stop(bp);
4742         vlan_group_set_device(bp->vlgrp, vid, NULL);
4743         bnx2_set_rx_mode(dev);
4744
4745         bnx2_netif_start(bp);
4746 }
4747 #endif
4748
4749 /* Called with netif_tx_lock.
4750  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4751  * netif_wake_queue().
4752  */
4753 static int
4754 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4755 {
4756         struct bnx2 *bp = netdev_priv(dev);
4757         dma_addr_t mapping;
4758         struct tx_bd *txbd;
4759         struct sw_bd *tx_buf;
4760         u32 len, vlan_tag_flags, last_frag, mss;
4761         u16 prod, ring_prod;
4762         int i;
4763
4764         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4765                 netif_stop_queue(dev);
4766                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4767                         dev->name);
4768
4769                 return NETDEV_TX_BUSY;
4770         }
4771         len = skb_headlen(skb);
4772         prod = bp->tx_prod;
4773         ring_prod = TX_RING_IDX(prod);
4774
4775         vlan_tag_flags = 0;
4776         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4777                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4778         }
4779
4780         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4781                 vlan_tag_flags |=
4782                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4783         }
4784         if ((mss = skb_shinfo(skb)->gso_size) &&
4785                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4786                 u32 tcp_opt_len, ip_tcp_len;
4787                 struct iphdr *iph;
4788
4789                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4790
4791                 tcp_opt_len = tcp_optlen(skb);
4792
4793                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4794                         u32 tcp_off = skb_transport_offset(skb) -
4795                                       sizeof(struct ipv6hdr) - ETH_HLEN;
4796
4797                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4798                                           TX_BD_FLAGS_SW_FLAGS;
4799                         if (likely(tcp_off == 0))
4800                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4801                         else {
4802                                 tcp_off >>= 3;
4803                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
4804                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
4805                                                   ((tcp_off & 0x10) <<
4806                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
4807                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4808                         }
4809                 } else {
4810                         if (skb_header_cloned(skb) &&
4811                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4812                                 dev_kfree_skb(skb);
4813                                 return NETDEV_TX_OK;
4814                         }
4815
4816                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4817
4818                         iph = ip_hdr(skb);
4819                         iph->check = 0;
4820                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4821                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4822                                                                  iph->daddr, 0,
4823                                                                  IPPROTO_TCP,
4824                                                                  0);
4825                         if (tcp_opt_len || (iph->ihl > 5)) {
4826                                 vlan_tag_flags |= ((iph->ihl - 5) +
4827                                                    (tcp_opt_len >> 2)) << 8;
4828                         }
4829                 }
4830         } else
4831                 mss = 0;
4832
4833         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4834
4835         tx_buf = &bp->tx_buf_ring[ring_prod];
4836         tx_buf->skb = skb;
4837         pci_unmap_addr_set(tx_buf, mapping, mapping);
4838
4839         txbd = &bp->tx_desc_ring[ring_prod];
4840
4841         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4842         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4843         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4844         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4845
4846         last_frag = skb_shinfo(skb)->nr_frags;
4847
4848         for (i = 0; i < last_frag; i++) {
4849                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4850
4851                 prod = NEXT_TX_BD(prod);
4852                 ring_prod = TX_RING_IDX(prod);
4853                 txbd = &bp->tx_desc_ring[ring_prod];
4854
4855                 len = frag->size;
4856                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4857                         len, PCI_DMA_TODEVICE);
4858                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4859                                 mapping, mapping);
4860
4861                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4862                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4863                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4864                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4865
4866         }
4867         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4868
4869         prod = NEXT_TX_BD(prod);
4870         bp->tx_prod_bseq += skb->len;
4871
4872         REG_WR16(bp, bp->tx_bidx_addr, prod);
4873         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4874
4875         mmiowb();
4876
4877         bp->tx_prod = prod;
4878         dev->trans_start = jiffies;
4879
4880         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4881                 netif_stop_queue(dev);
4882                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4883                         netif_wake_queue(dev);
4884         }
4885
4886         return NETDEV_TX_OK;
4887 }
4888
4889 /* Called with rtnl_lock */
4890 static int
4891 bnx2_close(struct net_device *dev)
4892 {
4893         struct bnx2 *bp = netdev_priv(dev);
4894         u32 reset_code;
4895
4896         /* Calling flush_scheduled_work() may deadlock because
4897          * linkwatch_event() may be on the workqueue and it will try to get
4898          * the rtnl_lock which we are holding.
4899          */
4900         while (bp->in_reset_task)
4901                 msleep(1);
4902
4903         bnx2_netif_stop(bp);
4904         del_timer_sync(&bp->timer);
4905         if (bp->flags & NO_WOL_FLAG)
4906                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4907         else if (bp->wol)
4908                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4909         else
4910                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4911         bnx2_reset_chip(bp, reset_code);
4912         free_irq(bp->pdev->irq, dev);
4913         if (bp->flags & USING_MSI_FLAG) {
4914                 pci_disable_msi(bp->pdev);
4915                 bp->flags &= ~USING_MSI_FLAG;
4916         }
4917         bnx2_free_skbs(bp);
4918         bnx2_free_mem(bp);
4919         bp->link_up = 0;
4920         netif_carrier_off(bp->dev);
4921         bnx2_set_power_state(bp, PCI_D3hot);
4922         return 0;
4923 }
4924
4925 #define GET_NET_STATS64(ctr)                                    \
4926         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4927         (unsigned long) (ctr##_lo)
4928
4929 #define GET_NET_STATS32(ctr)            \
4930         (ctr##_lo)
4931
4932 #if (BITS_PER_LONG == 64)
4933 #define GET_NET_STATS   GET_NET_STATS64
4934 #else
4935 #define GET_NET_STATS   GET_NET_STATS32
4936 #endif
4937
4938 static struct net_device_stats *
4939 bnx2_get_stats(struct net_device *dev)
4940 {
4941         struct bnx2 *bp = netdev_priv(dev);
4942         struct statistics_block *stats_blk = bp->stats_blk;
4943         struct net_device_stats *net_stats = &bp->net_stats;
4944
4945         if (bp->stats_blk == NULL) {
4946                 return net_stats;
4947         }
4948         net_stats->rx_packets =
4949                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4950                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4951                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4952
4953         net_stats->tx_packets =
4954                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4955                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4956                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4957
4958         net_stats->rx_bytes =
4959                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4960
4961         net_stats->tx_bytes =
4962                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4963
4964         net_stats->multicast =
4965                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4966
4967         net_stats->collisions =
4968                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4969
4970         net_stats->rx_length_errors =
4971                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4972                 stats_blk->stat_EtherStatsOverrsizePkts);
4973
4974         net_stats->rx_over_errors =
4975                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4976
4977         net_stats->rx_frame_errors =
4978                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4979
4980         net_stats->rx_crc_errors =
4981                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4982
4983         net_stats->rx_errors = net_stats->rx_length_errors +
4984                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4985                 net_stats->rx_crc_errors;
4986
4987         net_stats->tx_aborted_errors =
4988                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4989                 stats_blk->stat_Dot3StatsLateCollisions);
4990
4991         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4992             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4993                 net_stats->tx_carrier_errors = 0;
4994         else {
4995                 net_stats->tx_carrier_errors =
4996                         (unsigned long)
4997                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4998         }
4999
5000         net_stats->tx_errors =
5001                 (unsigned long)
5002                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5003                 +
5004                 net_stats->tx_aborted_errors +
5005                 net_stats->tx_carrier_errors;
5006
5007         net_stats->rx_missed_errors =
5008                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5009                 stats_blk->stat_FwRxDrop);
5010
5011         return net_stats;
5012 }
5013
5014 /* All ethtool functions called with rtnl_lock */
5015
5016 static int
5017 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5018 {
5019         struct bnx2 *bp = netdev_priv(dev);
5020
5021         cmd->supported = SUPPORTED_Autoneg;
5022         if (bp->phy_flags & PHY_SERDES_FLAG) {
5023                 cmd->supported |= SUPPORTED_1000baseT_Full |
5024                         SUPPORTED_FIBRE;
5025                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5026                         cmd->supported |= SUPPORTED_2500baseX_Full;
5027
5028                 cmd->port = PORT_FIBRE;
5029         }
5030         else {
5031                 cmd->supported |= SUPPORTED_10baseT_Half |
5032                         SUPPORTED_10baseT_Full |
5033                         SUPPORTED_100baseT_Half |
5034                         SUPPORTED_100baseT_Full |
5035                         SUPPORTED_1000baseT_Full |
5036                         SUPPORTED_TP;
5037
5038                 cmd->port = PORT_TP;
5039         }
5040
5041         cmd->advertising = bp->advertising;
5042
5043         if (bp->autoneg & AUTONEG_SPEED) {
5044                 cmd->autoneg = AUTONEG_ENABLE;
5045         }
5046         else {
5047                 cmd->autoneg = AUTONEG_DISABLE;
5048         }
5049
5050         if (netif_carrier_ok(dev)) {
5051                 cmd->speed = bp->line_speed;
5052                 cmd->duplex = bp->duplex;
5053         }
5054         else {
5055                 cmd->speed = -1;
5056                 cmd->duplex = -1;
5057         }
5058
5059         cmd->transceiver = XCVR_INTERNAL;
5060         cmd->phy_address = bp->phy_addr;
5061
5062         return 0;
5063 }
5064
5065 static int
5066 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5067 {
5068         struct bnx2 *bp = netdev_priv(dev);
5069         u8 autoneg = bp->autoneg;
5070         u8 req_duplex = bp->req_duplex;
5071         u16 req_line_speed = bp->req_line_speed;
5072         u32 advertising = bp->advertising;
5073
5074         if (cmd->autoneg == AUTONEG_ENABLE) {
5075                 autoneg |= AUTONEG_SPEED;
5076
5077                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5078
5079                 /* allow advertising 1 speed */
5080                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5081                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5082                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5083                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5084
5085                         if (bp->phy_flags & PHY_SERDES_FLAG)
5086                                 return -EINVAL;
5087
5088                         advertising = cmd->advertising;
5089
5090                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5091                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5092                                 return -EINVAL;
5093                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5094                         advertising = cmd->advertising;
5095                 }
5096                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5097                         return -EINVAL;
5098                 }
5099                 else {
5100                         if (bp->phy_flags & PHY_SERDES_FLAG) {
5101                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5102                         }
5103                         else {
5104                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5105                         }
5106                 }
5107                 advertising |= ADVERTISED_Autoneg;
5108         }
5109         else {
5110                 if (bp->phy_flags & PHY_SERDES_FLAG) {
5111                         if ((cmd->speed != SPEED_1000 &&
5112                              cmd->speed != SPEED_2500) ||
5113                             (cmd->duplex != DUPLEX_FULL))
5114                                 return -EINVAL;
5115
5116                         if (cmd->speed == SPEED_2500 &&
5117                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5118                                 return -EINVAL;
5119                 }
5120                 else if (cmd->speed == SPEED_1000) {
5121                         return -EINVAL;
5122                 }
5123                 autoneg &= ~AUTONEG_SPEED;
5124                 req_line_speed = cmd->speed;
5125                 req_duplex = cmd->duplex;
5126                 advertising = 0;
5127         }
5128
5129         bp->autoneg = autoneg;
5130         bp->advertising = advertising;
5131         bp->req_line_speed = req_line_speed;
5132         bp->req_duplex = req_duplex;
5133
5134         spin_lock_bh(&bp->phy_lock);
5135
5136         bnx2_setup_phy(bp);
5137
5138         spin_unlock_bh(&bp->phy_lock);
5139
5140         return 0;
5141 }
5142
5143 static void
5144 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5145 {
5146         struct bnx2 *bp = netdev_priv(dev);
5147
5148         strcpy(info->driver, DRV_MODULE_NAME);
5149         strcpy(info->version, DRV_MODULE_VERSION);
5150         strcpy(info->bus_info, pci_name(bp->pdev));
5151         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5152         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5153         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5154         info->fw_version[1] = info->fw_version[3] = '.';
5155         info->fw_version[5] = 0;
5156 }
5157
5158 #define BNX2_REGDUMP_LEN                (32 * 1024)
5159
5160 static int
5161 bnx2_get_regs_len(struct net_device *dev)
5162 {
5163         return BNX2_REGDUMP_LEN;
5164 }
5165
5166 static void
5167 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5168 {
5169         u32 *p = _p, i, offset;
5170         u8 *orig_p = _p;
5171         struct bnx2 *bp = netdev_priv(dev);
5172         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5173                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5174                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5175                                  0x1040, 0x1048, 0x1080, 0x10a4,
5176                                  0x1400, 0x1490, 0x1498, 0x14f0,
5177                                  0x1500, 0x155c, 0x1580, 0x15dc,
5178                                  0x1600, 0x1658, 0x1680, 0x16d8,
5179                                  0x1800, 0x1820, 0x1840, 0x1854,
5180                                  0x1880, 0x1894, 0x1900, 0x1984,
5181                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5182                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5183                                  0x2000, 0x2030, 0x23c0, 0x2400,
5184                                  0x2800, 0x2820, 0x2830, 0x2850,
5185                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5186                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5187                                  0x4080, 0x4090, 0x43c0, 0x4458,
5188                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5189                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5190                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5191                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5192                                  0x6800, 0x6848, 0x684c, 0x6860,
5193                                  0x6888, 0x6910, 0x8000 };
5194
5195         regs->version = 0;
5196
5197         memset(p, 0, BNX2_REGDUMP_LEN);
5198
5199         if (!netif_running(bp->dev))
5200                 return;
5201
5202         i = 0;
5203         offset = reg_boundaries[0];
5204         p += offset;
5205         while (offset < BNX2_REGDUMP_LEN) {
5206                 *p++ = REG_RD(bp, offset);
5207                 offset += 4;
5208                 if (offset == reg_boundaries[i + 1]) {
5209                         offset = reg_boundaries[i + 2];
5210                         p = (u32 *) (orig_p + offset);
5211                         i += 2;
5212                 }
5213         }
5214 }
5215
5216 static void
5217 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5218 {
5219         struct bnx2 *bp = netdev_priv(dev);
5220
5221         if (bp->flags & NO_WOL_FLAG) {
5222                 wol->supported = 0;
5223                 wol->wolopts = 0;
5224         }
5225         else {
5226                 wol->supported = WAKE_MAGIC;
5227                 if (bp->wol)
5228                         wol->wolopts = WAKE_MAGIC;
5229                 else
5230                         wol->wolopts = 0;
5231         }
5232         memset(&wol->sopass, 0, sizeof(wol->sopass));
5233 }
5234
5235 static int
5236 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5237 {
5238         struct bnx2 *bp = netdev_priv(dev);
5239
5240         if (wol->wolopts & ~WAKE_MAGIC)
5241                 return -EINVAL;
5242
5243         if (wol->wolopts & WAKE_MAGIC) {
5244                 if (bp->flags & NO_WOL_FLAG)
5245                         return -EINVAL;
5246
5247                 bp->wol = 1;
5248         }
5249         else {
5250                 bp->wol = 0;
5251         }
5252         return 0;
5253 }
5254
5255 static int
5256 bnx2_nway_reset(struct net_device *dev)
5257 {
5258         struct bnx2 *bp = netdev_priv(dev);
5259         u32 bmcr;
5260
5261         if (!(bp->autoneg & AUTONEG_SPEED)) {
5262                 return -EINVAL;
5263         }
5264
5265         spin_lock_bh(&bp->phy_lock);
5266
5267         /* Force a link down visible on the other side */
5268         if (bp->phy_flags & PHY_SERDES_FLAG) {
5269                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5270                 spin_unlock_bh(&bp->phy_lock);
5271
5272                 msleep(20);
5273
5274                 spin_lock_bh(&bp->phy_lock);
5275
5276                 bp->current_interval = SERDES_AN_TIMEOUT;
5277                 bp->serdes_an_pending = 1;
5278                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5279         }
5280
5281         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5282         bmcr &= ~BMCR_LOOPBACK;
5283         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5284
5285         spin_unlock_bh(&bp->phy_lock);
5286
5287         return 0;
5288 }
5289
5290 static int
5291 bnx2_get_eeprom_len(struct net_device *dev)
5292 {
5293         struct bnx2 *bp = netdev_priv(dev);
5294
5295         if (bp->flash_info == NULL)
5296                 return 0;
5297
5298         return (int) bp->flash_size;
5299 }
5300
5301 static int
5302 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5303                 u8 *eebuf)
5304 {
5305         struct bnx2 *bp = netdev_priv(dev);
5306         int rc;
5307
5308         /* parameters already validated in ethtool_get_eeprom */
5309
5310         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5311
5312         return rc;
5313 }
5314
5315 static int
5316 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5317                 u8 *eebuf)
5318 {
5319         struct bnx2 *bp = netdev_priv(dev);
5320         int rc;
5321
5322         /* parameters already validated in ethtool_set_eeprom */
5323
5324         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5325
5326         return rc;
5327 }
5328
5329 static int
5330 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5331 {
5332         struct bnx2 *bp = netdev_priv(dev);
5333
5334         memset(coal, 0, sizeof(struct ethtool_coalesce));
5335
5336         coal->rx_coalesce_usecs = bp->rx_ticks;
5337         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5338         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5339         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5340
5341         coal->tx_coalesce_usecs = bp->tx_ticks;
5342         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5343         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5344         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5345
5346         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5347
5348         return 0;
5349 }
5350
5351 static int
5352 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5353 {
5354         struct bnx2 *bp = netdev_priv(dev);
5355
5356         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5357         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5358
5359         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5360         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5361
5362         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5363         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5364
5365         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5366         if (bp->rx_quick_cons_trip_int > 0xff)
5367                 bp->rx_quick_cons_trip_int = 0xff;
5368
5369         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5370         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5371
5372         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5373         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5374
5375         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5376         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5377
5378         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5379         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5380                 0xff;
5381
5382         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5383         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5384         bp->stats_ticks &= 0xffff00;
5385
5386         if (netif_running(bp->dev)) {
5387                 bnx2_netif_stop(bp);
5388                 bnx2_init_nic(bp);
5389                 bnx2_netif_start(bp);
5390         }
5391
5392         return 0;
5393 }
5394
5395 static void
5396 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5397 {
5398         struct bnx2 *bp = netdev_priv(dev);
5399
5400         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5401         ering->rx_mini_max_pending = 0;
5402         ering->rx_jumbo_max_pending = 0;
5403
5404         ering->rx_pending = bp->rx_ring_size;
5405         ering->rx_mini_pending = 0;
5406         ering->rx_jumbo_pending = 0;
5407
5408         ering->tx_max_pending = MAX_TX_DESC_CNT;
5409         ering->tx_pending = bp->tx_ring_size;
5410 }
5411
5412 static int
5413 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5414 {
5415         struct bnx2 *bp = netdev_priv(dev);
5416
5417         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5418                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5419                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5420
5421                 return -EINVAL;
5422         }
5423         if (netif_running(bp->dev)) {
5424                 bnx2_netif_stop(bp);
5425                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5426                 bnx2_free_skbs(bp);
5427                 bnx2_free_mem(bp);
5428         }
5429
5430         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5431         bp->tx_ring_size = ering->tx_pending;
5432
5433         if (netif_running(bp->dev)) {
5434                 int rc;
5435
5436                 rc = bnx2_alloc_mem(bp);
5437                 if (rc)
5438                         return rc;
5439                 bnx2_init_nic(bp);
5440                 bnx2_netif_start(bp);
5441         }
5442
5443         return 0;
5444 }
5445
5446 static void
5447 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5448 {
5449         struct bnx2 *bp = netdev_priv(dev);
5450
5451         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5452         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5453         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5454 }
5455
5456 static int
5457 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5458 {
5459         struct bnx2 *bp = netdev_priv(dev);
5460
5461         bp->req_flow_ctrl = 0;
5462         if (epause->rx_pause)
5463                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5464         if (epause->tx_pause)
5465                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5466
5467         if (epause->autoneg) {
5468                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5469         }
5470         else {
5471                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5472         }
5473
5474         spin_lock_bh(&bp->phy_lock);
5475
5476         bnx2_setup_phy(bp);
5477
5478         spin_unlock_bh(&bp->phy_lock);
5479
5480         return 0;
5481 }
5482
5483 static u32
5484 bnx2_get_rx_csum(struct net_device *dev)
5485 {
5486         struct bnx2 *bp = netdev_priv(dev);
5487
5488         return bp->rx_csum;
5489 }
5490
5491 static int
5492 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5493 {
5494         struct bnx2 *bp = netdev_priv(dev);
5495
5496         bp->rx_csum = data;
5497         return 0;
5498 }
5499
5500 static int
5501 bnx2_set_tso(struct net_device *dev, u32 data)
5502 {
5503         struct bnx2 *bp = netdev_priv(dev);
5504
5505         if (data) {
5506                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5507                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5508                         dev->features |= NETIF_F_TSO6;
5509         } else
5510                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5511                                    NETIF_F_TSO_ECN);
5512         return 0;
5513 }
5514
5515 #define BNX2_NUM_STATS 46
5516
5517 static struct {
5518         char string[ETH_GSTRING_LEN];
5519 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5520         { "rx_bytes" },
5521         { "rx_error_bytes" },
5522         { "tx_bytes" },
5523         { "tx_error_bytes" },
5524         { "rx_ucast_packets" },
5525         { "rx_mcast_packets" },
5526         { "rx_bcast_packets" },
5527         { "tx_ucast_packets" },
5528         { "tx_mcast_packets" },
5529         { "tx_bcast_packets" },
5530         { "tx_mac_errors" },
5531         { "tx_carrier_errors" },
5532         { "rx_crc_errors" },
5533         { "rx_align_errors" },
5534         { "tx_single_collisions" },
5535         { "tx_multi_collisions" },
5536         { "tx_deferred" },
5537         { "tx_excess_collisions" },
5538         { "tx_late_collisions" },
5539         { "tx_total_collisions" },
5540         { "rx_fragments" },
5541         { "rx_jabbers" },
5542         { "rx_undersize_packets" },
5543         { "rx_oversize_packets" },
5544         { "rx_64_byte_packets" },
5545         { "rx_65_to_127_byte_packets" },
5546         { "rx_128_to_255_byte_packets" },
5547         { "rx_256_to_511_byte_packets" },
5548         { "rx_512_to_1023_byte_packets" },
5549         { "rx_1024_to_1522_byte_packets" },
5550         { "rx_1523_to_9022_byte_packets" },
5551         { "tx_64_byte_packets" },
5552         { "tx_65_to_127_byte_packets" },
5553         { "tx_128_to_255_byte_packets" },
5554         { "tx_256_to_511_byte_packets" },
5555         { "tx_512_to_1023_byte_packets" },
5556         { "tx_1024_to_1522_byte_packets" },
5557         { "tx_1523_to_9022_byte_packets" },
5558         { "rx_xon_frames" },
5559         { "rx_xoff_frames" },
5560         { "tx_xon_frames" },
5561         { "tx_xoff_frames" },
5562         { "rx_mac_ctrl_frames" },
5563         { "rx_filtered_packets" },
5564         { "rx_discards" },
5565         { "rx_fw_discards" },
5566 };
5567
5568 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5569
5570 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5571     STATS_OFFSET32(stat_IfHCInOctets_hi),
5572     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5573     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5574     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5575     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5576     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5577     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5578     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5579     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5580     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5581     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5582     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5583     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5584     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5585     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5586     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5587     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5588     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5589     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5590     STATS_OFFSET32(stat_EtherStatsCollisions),
5591     STATS_OFFSET32(stat_EtherStatsFragments),
5592     STATS_OFFSET32(stat_EtherStatsJabbers),
5593     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5594     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5595     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5596     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5597     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5598     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5599     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5600     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5601     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5602     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5603     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5604     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5605     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5606     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5607     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5608     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5609     STATS_OFFSET32(stat_XonPauseFramesReceived),
5610     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5611     STATS_OFFSET32(stat_OutXonSent),
5612     STATS_OFFSET32(stat_OutXoffSent),
5613     STATS_OFFSET32(stat_MacControlFramesReceived),
5614     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5615     STATS_OFFSET32(stat_IfInMBUFDiscards),
5616     STATS_OFFSET32(stat_FwRxDrop),
5617 };
5618
5619 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5620  * skipped because of errata.
5621  */
5622 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5623         8,0,8,8,8,8,8,8,8,8,
5624         4,0,4,4,4,4,4,4,4,4,
5625         4,4,4,4,4,4,4,4,4,4,
5626         4,4,4,4,4,4,4,4,4,4,
5627         4,4,4,4,4,4,
5628 };
5629
5630 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5631         8,0,8,8,8,8,8,8,8,8,
5632         4,4,4,4,4,4,4,4,4,4,
5633         4,4,4,4,4,4,4,4,4,4,
5634         4,4,4,4,4,4,4,4,4,4,
5635         4,4,4,4,4,4,
5636 };
5637
5638 #define BNX2_NUM_TESTS 6
5639
5640 static struct {
5641         char string[ETH_GSTRING_LEN];
5642 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5643         { "register_test (offline)" },
5644         { "memory_test (offline)" },
5645         { "loopback_test (offline)" },
5646         { "nvram_test (online)" },
5647         { "interrupt_test (online)" },
5648         { "link_test (online)" },
5649 };
5650
5651 static int
5652 bnx2_self_test_count(struct net_device *dev)
5653 {
5654         return BNX2_NUM_TESTS;
5655 }
5656
5657 static void
5658 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5659 {
5660         struct bnx2 *bp = netdev_priv(dev);
5661
5662         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5663         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5664                 int i;
5665
5666                 bnx2_netif_stop(bp);
5667                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5668                 bnx2_free_skbs(bp);
5669
5670                 if (bnx2_test_registers(bp) != 0) {
5671                         buf[0] = 1;
5672                         etest->flags |= ETH_TEST_FL_FAILED;
5673                 }
5674                 if (bnx2_test_memory(bp) != 0) {
5675                         buf[1] = 1;
5676                         etest->flags |= ETH_TEST_FL_FAILED;
5677                 }
5678                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5679                         etest->flags |= ETH_TEST_FL_FAILED;
5680
5681                 if (!netif_running(bp->dev)) {
5682                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5683                 }
5684                 else {
5685                         bnx2_init_nic(bp);
5686                         bnx2_netif_start(bp);
5687                 }
5688
5689                 /* wait for link up */
5690                 for (i = 0; i < 7; i++) {
5691                         if (bp->link_up)
5692                                 break;
5693                         msleep_interruptible(1000);
5694                 }
5695         }
5696
5697         if (bnx2_test_nvram(bp) != 0) {
5698                 buf[3] = 1;
5699                 etest->flags |= ETH_TEST_FL_FAILED;
5700         }
5701         if (bnx2_test_intr(bp) != 0) {
5702                 buf[4] = 1;
5703                 etest->flags |= ETH_TEST_FL_FAILED;
5704         }
5705
5706         if (bnx2_test_link(bp) != 0) {
5707                 buf[5] = 1;
5708                 etest->flags |= ETH_TEST_FL_FAILED;
5709
5710         }
5711 }
5712
5713 static void
5714 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5715 {
5716         switch (stringset) {
5717         case ETH_SS_STATS:
5718                 memcpy(buf, bnx2_stats_str_arr,
5719                         sizeof(bnx2_stats_str_arr));
5720                 break;
5721         case ETH_SS_TEST:
5722                 memcpy(buf, bnx2_tests_str_arr,
5723                         sizeof(bnx2_tests_str_arr));
5724                 break;
5725         }
5726 }
5727
5728 static int
5729 bnx2_get_stats_count(struct net_device *dev)
5730 {
5731         return BNX2_NUM_STATS;
5732 }
5733
5734 static void
5735 bnx2_get_ethtool_stats(struct net_device *dev,
5736                 struct ethtool_stats *stats, u64 *buf)
5737 {
5738         struct bnx2 *bp = netdev_priv(dev);
5739         int i;
5740         u32 *hw_stats = (u32 *) bp->stats_blk;
5741         u8 *stats_len_arr = NULL;
5742
5743         if (hw_stats == NULL) {
5744                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5745                 return;
5746         }
5747
5748         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5749             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5750             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5751             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5752                 stats_len_arr = bnx2_5706_stats_len_arr;
5753         else
5754                 stats_len_arr = bnx2_5708_stats_len_arr;
5755
5756         for (i = 0; i < BNX2_NUM_STATS; i++) {
5757                 if (stats_len_arr[i] == 0) {
5758                         /* skip this counter */
5759                         buf[i] = 0;
5760                         continue;
5761                 }
5762                 if (stats_len_arr[i] == 4) {
5763                         /* 4-byte counter */
5764                         buf[i] = (u64)
5765                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5766                         continue;
5767                 }
5768                 /* 8-byte counter */
5769                 buf[i] = (((u64) *(hw_stats +
5770                                         bnx2_stats_offset_arr[i])) << 32) +
5771                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5772         }
5773 }
5774
5775 static int
5776 bnx2_phys_id(struct net_device *dev, u32 data)
5777 {
5778         struct bnx2 *bp = netdev_priv(dev);
5779         int i;
5780         u32 save;
5781
5782         if (data == 0)
5783                 data = 2;
5784
5785         save = REG_RD(bp, BNX2_MISC_CFG);
5786         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5787
5788         for (i = 0; i < (data * 2); i++) {
5789                 if ((i % 2) == 0) {
5790                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5791                 }
5792                 else {
5793                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5794                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5795                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5796                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5797                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5798                                 BNX2_EMAC_LED_TRAFFIC);
5799                 }
5800                 msleep_interruptible(500);
5801                 if (signal_pending(current))
5802                         break;
5803         }
5804         REG_WR(bp, BNX2_EMAC_LED, 0);
5805         REG_WR(bp, BNX2_MISC_CFG, save);
5806         return 0;
5807 }
5808
5809 static int
5810 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5811 {
5812         struct bnx2 *bp = netdev_priv(dev);
5813
5814         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5815                 return (ethtool_op_set_tx_hw_csum(dev, data));
5816         else
5817                 return (ethtool_op_set_tx_csum(dev, data));
5818 }
5819
5820 static const struct ethtool_ops bnx2_ethtool_ops = {
5821         .get_settings           = bnx2_get_settings,
5822         .set_settings           = bnx2_set_settings,
5823         .get_drvinfo            = bnx2_get_drvinfo,
5824         .get_regs_len           = bnx2_get_regs_len,
5825         .get_regs               = bnx2_get_regs,
5826         .get_wol                = bnx2_get_wol,
5827         .set_wol                = bnx2_set_wol,
5828         .nway_reset             = bnx2_nway_reset,
5829         .get_link               = ethtool_op_get_link,
5830         .get_eeprom_len         = bnx2_get_eeprom_len,
5831         .get_eeprom             = bnx2_get_eeprom,
5832         .set_eeprom             = bnx2_set_eeprom,
5833         .get_coalesce           = bnx2_get_coalesce,
5834         .set_coalesce           = bnx2_set_coalesce,
5835         .get_ringparam          = bnx2_get_ringparam,
5836         .set_ringparam          = bnx2_set_ringparam,
5837         .get_pauseparam         = bnx2_get_pauseparam,
5838         .set_pauseparam         = bnx2_set_pauseparam,
5839         .get_rx_csum            = bnx2_get_rx_csum,
5840         .set_rx_csum            = bnx2_set_rx_csum,
5841         .get_tx_csum            = ethtool_op_get_tx_csum,
5842         .set_tx_csum            = bnx2_set_tx_csum,
5843         .get_sg                 = ethtool_op_get_sg,
5844         .set_sg                 = ethtool_op_set_sg,
5845         .get_tso                = ethtool_op_get_tso,
5846         .set_tso                = bnx2_set_tso,
5847         .self_test_count        = bnx2_self_test_count,
5848         .self_test              = bnx2_self_test,
5849         .get_strings            = bnx2_get_strings,
5850         .phys_id                = bnx2_phys_id,
5851         .get_stats_count        = bnx2_get_stats_count,
5852         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5853         .get_perm_addr          = ethtool_op_get_perm_addr,
5854 };
5855
5856 /* Called with rtnl_lock */
5857 static int
5858 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5859 {
5860         struct mii_ioctl_data *data = if_mii(ifr);
5861         struct bnx2 *bp = netdev_priv(dev);
5862         int err;
5863
5864         switch(cmd) {
5865         case SIOCGMIIPHY:
5866                 data->phy_id = bp->phy_addr;
5867
5868                 /* fallthru */
5869         case SIOCGMIIREG: {
5870                 u32 mii_regval;
5871
5872                 if (!netif_running(dev))
5873                         return -EAGAIN;
5874
5875                 spin_lock_bh(&bp->phy_lock);
5876                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5877                 spin_unlock_bh(&bp->phy_lock);
5878
5879                 data->val_out = mii_regval;
5880
5881                 return err;
5882         }
5883
5884         case SIOCSMIIREG:
5885                 if (!capable(CAP_NET_ADMIN))
5886                         return -EPERM;
5887
5888                 if (!netif_running(dev))
5889                         return -EAGAIN;
5890
5891                 spin_lock_bh(&bp->phy_lock);
5892                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5893                 spin_unlock_bh(&bp->phy_lock);
5894
5895                 return err;
5896
5897         default:
5898                 /* do nothing */
5899                 break;
5900         }
5901         return -EOPNOTSUPP;
5902 }
5903
5904 /* Called with rtnl_lock */
5905 static int
5906 bnx2_change_mac_addr(struct net_device *dev, void *p)
5907 {
5908         struct sockaddr *addr = p;
5909         struct bnx2 *bp = netdev_priv(dev);
5910
5911         if (!is_valid_ether_addr(addr->sa_data))
5912                 return -EINVAL;
5913
5914         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5915         if (netif_running(dev))
5916                 bnx2_set_mac_addr(bp);
5917
5918         return 0;
5919 }
5920
5921 /* Called with rtnl_lock */
5922 static int
5923 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5924 {
5925         struct bnx2 *bp = netdev_priv(dev);
5926
5927         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5928                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5929                 return -EINVAL;
5930
5931         dev->mtu = new_mtu;
5932         if (netif_running(dev)) {
5933                 bnx2_netif_stop(bp);
5934
5935                 bnx2_init_nic(bp);
5936
5937                 bnx2_netif_start(bp);
5938         }
5939         return 0;
5940 }
5941
5942 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5943 static void
5944 poll_bnx2(struct net_device *dev)
5945 {
5946         struct bnx2 *bp = netdev_priv(dev);
5947
5948         disable_irq(bp->pdev->irq);
5949         bnx2_interrupt(bp->pdev->irq, dev);
5950         enable_irq(bp->pdev->irq);
5951 }
5952 #endif
5953
5954 static void __devinit
5955 bnx2_get_5709_media(struct bnx2 *bp)
5956 {
5957         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5958         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5959         u32 strap;
5960
5961         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5962                 return;
5963         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5964                 bp->phy_flags |= PHY_SERDES_FLAG;
5965                 return;
5966         }
5967
5968         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5969                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5970         else
5971                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5972
5973         if (PCI_FUNC(bp->pdev->devfn) == 0) {
5974                 switch (strap) {
5975                 case 0x4:
5976                 case 0x5:
5977                 case 0x6:
5978                         bp->phy_flags |= PHY_SERDES_FLAG;
5979                         return;
5980                 }
5981         } else {
5982                 switch (strap) {
5983                 case 0x1:
5984                 case 0x2:
5985                 case 0x4:
5986                         bp->phy_flags |= PHY_SERDES_FLAG;
5987                         return;
5988                 }
5989         }
5990 }
5991
5992 static int __devinit
5993 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5994 {
5995         struct bnx2 *bp;
5996         unsigned long mem_len;
5997         int rc;
5998         u32 reg;
5999         u64 dma_mask, persist_dma_mask;
6000
6001         SET_MODULE_OWNER(dev);
6002         SET_NETDEV_DEV(dev, &pdev->dev);
6003         bp = netdev_priv(dev);
6004
6005         bp->flags = 0;
6006         bp->phy_flags = 0;
6007
6008         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6009         rc = pci_enable_device(pdev);
6010         if (rc) {
6011                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6012                 goto err_out;
6013         }
6014
6015         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6016                 dev_err(&pdev->dev,
6017                         "Cannot find PCI device base address, aborting.\n");
6018                 rc = -ENODEV;
6019                 goto err_out_disable;
6020         }
6021
6022         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6023         if (rc) {
6024                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6025                 goto err_out_disable;
6026         }
6027
6028         pci_set_master(pdev);
6029
6030         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6031         if (bp->pm_cap == 0) {
6032                 dev_err(&pdev->dev,
6033                         "Cannot find power management capability, aborting.\n");
6034                 rc = -EIO;
6035                 goto err_out_release;
6036         }
6037
6038         bp->dev = dev;
6039         bp->pdev = pdev;
6040
6041         spin_lock_init(&bp->phy_lock);
6042         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6043
6044         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6045         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6046         dev->mem_end = dev->mem_start + mem_len;
6047         dev->irq = pdev->irq;
6048
6049         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6050
6051         if (!bp->regview) {
6052                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6053                 rc = -ENOMEM;
6054                 goto err_out_release;
6055         }
6056
6057         /* Configure byte swap and enable write to the reg_window registers.
6058          * Rely on CPU to do target byte swapping on big endian systems
6059          * The chip's target access swapping will not swap all accesses
6060          */
6061         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6062                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6063                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6064
6065         bnx2_set_power_state(bp, PCI_D0);
6066
6067         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6068
6069         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6070                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6071                 if (bp->pcix_cap == 0) {
6072                         dev_err(&pdev->dev,
6073                                 "Cannot find PCIX capability, aborting.\n");
6074                         rc = -EIO;
6075                         goto err_out_unmap;
6076                 }
6077         }
6078
6079         /* 5708 cannot support DMA addresses > 40-bit.  */
6080         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6081                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6082         else
6083                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6084
6085         /* Configure DMA attributes. */
6086         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6087                 dev->features |= NETIF_F_HIGHDMA;
6088                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6089                 if (rc) {
6090                         dev_err(&pdev->dev,
6091                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6092                         goto err_out_unmap;
6093                 }
6094         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6095                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6096                 goto err_out_unmap;
6097         }
6098
6099         /* Get bus information. */
6100         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6101         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6102                 u32 clkreg;
6103
6104                 bp->flags |= PCIX_FLAG;
6105
6106                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6107
6108                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6109                 switch (clkreg) {
6110                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6111                         bp->bus_speed_mhz = 133;
6112                         break;
6113
6114                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6115                         bp->bus_speed_mhz = 100;
6116                         break;
6117
6118                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6119                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6120                         bp->bus_speed_mhz = 66;
6121                         break;
6122
6123                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6124                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6125                         bp->bus_speed_mhz = 50;
6126                         break;
6127
6128                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6129                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6130                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6131                         bp->bus_speed_mhz = 33;
6132                         break;
6133                 }
6134         }
6135         else {
6136                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6137                         bp->bus_speed_mhz = 66;
6138                 else
6139                         bp->bus_speed_mhz = 33;
6140         }
6141
6142         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6143                 bp->flags |= PCI_32BIT_FLAG;
6144
6145         /* 5706A0 may falsely detect SERR and PERR. */
6146         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6147                 reg = REG_RD(bp, PCI_COMMAND);
6148                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6149                 REG_WR(bp, PCI_COMMAND, reg);
6150         }
6151         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6152                 !(bp->flags & PCIX_FLAG)) {
6153
6154                 dev_err(&pdev->dev,
6155                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6156                 goto err_out_unmap;
6157         }
6158
6159         bnx2_init_nvram(bp);
6160
6161         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6162
6163         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6164             BNX2_SHM_HDR_SIGNATURE_SIG) {
6165                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6166
6167                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6168         } else
6169                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6170
6171         /* Get the permanent MAC address.  First we need to make sure the
6172          * firmware is actually running.
6173          */
6174         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6175
6176         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_