[BNX2]: Reduce spurious INTA interrupts.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.11"
58 #define DRV_MODULE_RELDATE      "June 4, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554                 return;
555
556         if (bp->link_up) {
557                 u32 bmsr;
558
559                 switch (bp->line_speed) {
560                 case SPEED_10:
561                         if (bp->duplex == DUPLEX_HALF)
562                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
563                         else
564                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
565                         break;
566                 case SPEED_100:
567                         if (bp->duplex == DUPLEX_HALF)
568                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
569                         else
570                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
571                         break;
572                 case SPEED_1000:
573                         if (bp->duplex == DUPLEX_HALF)
574                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575                         else
576                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577                         break;
578                 case SPEED_2500:
579                         if (bp->duplex == DUPLEX_HALF)
580                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581                         else
582                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583                         break;
584                 }
585
586                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588                 if (bp->autoneg) {
589                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
591                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
593
594                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597                         else
598                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599                 }
600         }
601         else
602                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 }
606
607 static char *
608 bnx2_xceiver_str(struct bnx2 *bp)
609 {
610         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612                  "Copper"));
613 }
614
615 static void
616 bnx2_report_link(struct bnx2 *bp)
617 {
618         if (bp->link_up) {
619                 netif_carrier_on(bp->dev);
620                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621                        bnx2_xceiver_str(bp));
622
623                 printk("%d Mbps ", bp->line_speed);
624
625                 if (bp->duplex == DUPLEX_FULL)
626                         printk("full duplex");
627                 else
628                         printk("half duplex");
629
630                 if (bp->flow_ctrl) {
631                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
632                                 printk(", receive ");
633                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
634                                         printk("& transmit ");
635                         }
636                         else {
637                                 printk(", transmit ");
638                         }
639                         printk("flow control ON");
640                 }
641                 printk("\n");
642         }
643         else {
644                 netif_carrier_off(bp->dev);
645                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646                        bnx2_xceiver_str(bp));
647         }
648
649         bnx2_report_fw_link(bp);
650 }
651
652 static void
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654 {
655         u32 local_adv, remote_adv;
656
657         bp->flow_ctrl = 0;
658         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661                 if (bp->duplex == DUPLEX_FULL) {
662                         bp->flow_ctrl = bp->req_flow_ctrl;
663                 }
664                 return;
665         }
666
667         if (bp->duplex != DUPLEX_FULL) {
668                 return;
669         }
670
671         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673                 u32 val;
674
675                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677                         bp->flow_ctrl |= FLOW_CTRL_TX;
678                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679                         bp->flow_ctrl |= FLOW_CTRL_RX;
680                 return;
681         }
682
683         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
685
686         if (bp->phy_flags & PHY_SERDES_FLAG) {
687                 u32 new_local_adv = 0;
688                 u32 new_remote_adv = 0;
689
690                 if (local_adv & ADVERTISE_1000XPAUSE)
691                         new_local_adv |= ADVERTISE_PAUSE_CAP;
692                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
694                 if (remote_adv & ADVERTISE_1000XPAUSE)
695                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
696                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699                 local_adv = new_local_adv;
700                 remote_adv = new_remote_adv;
701         }
702
703         /* See Table 28B-3 of 802.3ab-1999 spec. */
704         if (local_adv & ADVERTISE_PAUSE_CAP) {
705                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
707                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708                         }
709                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710                                 bp->flow_ctrl = FLOW_CTRL_RX;
711                         }
712                 }
713                 else {
714                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
715                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716                         }
717                 }
718         }
719         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723                         bp->flow_ctrl = FLOW_CTRL_TX;
724                 }
725         }
726 }
727
728 static int
729 bnx2_5709s_linkup(struct bnx2 *bp)
730 {
731         u32 val, speed;
732
733         bp->link_up = 1;
734
735         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740                 bp->line_speed = bp->req_line_speed;
741                 bp->duplex = bp->req_duplex;
742                 return 0;
743         }
744         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745         switch (speed) {
746                 case MII_BNX2_GP_TOP_AN_SPEED_10:
747                         bp->line_speed = SPEED_10;
748                         break;
749                 case MII_BNX2_GP_TOP_AN_SPEED_100:
750                         bp->line_speed = SPEED_100;
751                         break;
752                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754                         bp->line_speed = SPEED_1000;
755                         break;
756                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757                         bp->line_speed = SPEED_2500;
758                         break;
759         }
760         if (val & MII_BNX2_GP_TOP_AN_FD)
761                 bp->duplex = DUPLEX_FULL;
762         else
763                 bp->duplex = DUPLEX_HALF;
764         return 0;
765 }
766
767 static int
768 bnx2_5708s_linkup(struct bnx2 *bp)
769 {
770         u32 val;
771
772         bp->link_up = 1;
773         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775                 case BCM5708S_1000X_STAT1_SPEED_10:
776                         bp->line_speed = SPEED_10;
777                         break;
778                 case BCM5708S_1000X_STAT1_SPEED_100:
779                         bp->line_speed = SPEED_100;
780                         break;
781                 case BCM5708S_1000X_STAT1_SPEED_1G:
782                         bp->line_speed = SPEED_1000;
783                         break;
784                 case BCM5708S_1000X_STAT1_SPEED_2G5:
785                         bp->line_speed = SPEED_2500;
786                         break;
787         }
788         if (val & BCM5708S_1000X_STAT1_FD)
789                 bp->duplex = DUPLEX_FULL;
790         else
791                 bp->duplex = DUPLEX_HALF;
792
793         return 0;
794 }
795
796 static int
797 bnx2_5706s_linkup(struct bnx2 *bp)
798 {
799         u32 bmcr, local_adv, remote_adv, common;
800
801         bp->link_up = 1;
802         bp->line_speed = SPEED_1000;
803
804         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805         if (bmcr & BMCR_FULLDPLX) {
806                 bp->duplex = DUPLEX_FULL;
807         }
808         else {
809                 bp->duplex = DUPLEX_HALF;
810         }
811
812         if (!(bmcr & BMCR_ANENABLE)) {
813                 return 0;
814         }
815
816         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
818
819         common = local_adv & remote_adv;
820         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822                 if (common & ADVERTISE_1000XFULL) {
823                         bp->duplex = DUPLEX_FULL;
824                 }
825                 else {
826                         bp->duplex = DUPLEX_HALF;
827                 }
828         }
829
830         return 0;
831 }
832
833 static int
834 bnx2_copper_linkup(struct bnx2 *bp)
835 {
836         u32 bmcr;
837
838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839         if (bmcr & BMCR_ANENABLE) {
840                 u32 local_adv, remote_adv, common;
841
842                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845                 common = local_adv & (remote_adv >> 2);
846                 if (common & ADVERTISE_1000FULL) {
847                         bp->line_speed = SPEED_1000;
848                         bp->duplex = DUPLEX_FULL;
849                 }
850                 else if (common & ADVERTISE_1000HALF) {
851                         bp->line_speed = SPEED_1000;
852                         bp->duplex = DUPLEX_HALF;
853                 }
854                 else {
855                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
857
858                         common = local_adv & remote_adv;
859                         if (common & ADVERTISE_100FULL) {
860                                 bp->line_speed = SPEED_100;
861                                 bp->duplex = DUPLEX_FULL;
862                         }
863                         else if (common & ADVERTISE_100HALF) {
864                                 bp->line_speed = SPEED_100;
865                                 bp->duplex = DUPLEX_HALF;
866                         }
867                         else if (common & ADVERTISE_10FULL) {
868                                 bp->line_speed = SPEED_10;
869                                 bp->duplex = DUPLEX_FULL;
870                         }
871                         else if (common & ADVERTISE_10HALF) {
872                                 bp->line_speed = SPEED_10;
873                                 bp->duplex = DUPLEX_HALF;
874                         }
875                         else {
876                                 bp->line_speed = 0;
877                                 bp->link_up = 0;
878                         }
879                 }
880         }
881         else {
882                 if (bmcr & BMCR_SPEED100) {
883                         bp->line_speed = SPEED_100;
884                 }
885                 else {
886                         bp->line_speed = SPEED_10;
887                 }
888                 if (bmcr & BMCR_FULLDPLX) {
889                         bp->duplex = DUPLEX_FULL;
890                 }
891                 else {
892                         bp->duplex = DUPLEX_HALF;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int
900 bnx2_set_mac_link(struct bnx2 *bp)
901 {
902         u32 val;
903
904         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906                 (bp->duplex == DUPLEX_HALF)) {
907                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908         }
909
910         /* Configure the EMAC mode register. */
911         val = REG_RD(bp, BNX2_EMAC_MODE);
912
913         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915                 BNX2_EMAC_MODE_25G_MODE);
916
917         if (bp->link_up) {
918                 switch (bp->line_speed) {
919                         case SPEED_10:
920                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
922                                         break;
923                                 }
924                                 /* fall through */
925                         case SPEED_100:
926                                 val |= BNX2_EMAC_MODE_PORT_MII;
927                                 break;
928                         case SPEED_2500:
929                                 val |= BNX2_EMAC_MODE_25G_MODE;
930                                 /* fall through */
931                         case SPEED_1000:
932                                 val |= BNX2_EMAC_MODE_PORT_GMII;
933                                 break;
934                 }
935         }
936         else {
937                 val |= BNX2_EMAC_MODE_PORT_GMII;
938         }
939
940         /* Set the MAC to operate in the appropriate duplex mode. */
941         if (bp->duplex == DUPLEX_HALF)
942                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943         REG_WR(bp, BNX2_EMAC_MODE, val);
944
945         /* Enable/disable rx PAUSE. */
946         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948         if (bp->flow_ctrl & FLOW_CTRL_RX)
949                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952         /* Enable/disable tx PAUSE. */
953         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956         if (bp->flow_ctrl & FLOW_CTRL_TX)
957                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960         /* Acknowledge the interrupt. */
961         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963         return 0;
964 }
965
966 static void
967 bnx2_enable_bmsr1(struct bnx2 *bp)
968 {
969         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970             (CHIP_NUM(bp) == CHIP_NUM_5709))
971                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972                                MII_BNX2_BLK_ADDR_GP_STATUS);
973 }
974
975 static void
976 bnx2_disable_bmsr1(struct bnx2 *bp)
977 {
978         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979             (CHIP_NUM(bp) == CHIP_NUM_5709))
980                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982 }
983
984 static int
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
986 {
987         u32 up1;
988         int ret = 1;
989
990         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991                 return 0;
992
993         if (bp->autoneg & AUTONEG_SPEED)
994                 bp->advertising |= ADVERTISED_2500baseX_Full;
995
996         if (CHIP_NUM(bp) == CHIP_NUM_5709)
997                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
999         bnx2_read_phy(bp, bp->mii_up1, &up1);
1000         if (!(up1 & BCM5708S_UP1_2G5)) {
1001                 up1 |= BCM5708S_UP1_2G5;
1002                 bnx2_write_phy(bp, bp->mii_up1, up1);
1003                 ret = 0;
1004         }
1005
1006         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
1010         return ret;
1011 }
1012
1013 static int
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015 {
1016         u32 up1;
1017         int ret = 0;
1018
1019         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020                 return 0;
1021
1022         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
1025         bnx2_read_phy(bp, bp->mii_up1, &up1);
1026         if (up1 & BCM5708S_UP1_2G5) {
1027                 up1 &= ~BCM5708S_UP1_2G5;
1028                 bnx2_write_phy(bp, bp->mii_up1, up1);
1029                 ret = 1;
1030         }
1031
1032         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
1036         return ret;
1037 }
1038
1039 static void
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1041 {
1042         u32 bmcr;
1043
1044         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045                 return;
1046
1047         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048                 u32 val;
1049
1050                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1052                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064         }
1065
1066         if (bp->autoneg & AUTONEG_SPEED) {
1067                 bmcr &= ~BMCR_ANENABLE;
1068                 if (bp->req_duplex == DUPLEX_FULL)
1069                         bmcr |= BMCR_FULLDPLX;
1070         }
1071         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072 }
1073
1074 static void
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1076 {
1077         u32 bmcr;
1078
1079         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080                 return;
1081
1082         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083                 u32 val;
1084
1085                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1087                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098         }
1099
1100         if (bp->autoneg & AUTONEG_SPEED)
1101                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103 }
1104
1105 static int
1106 bnx2_set_link(struct bnx2 *bp)
1107 {
1108         u32 bmsr;
1109         u8 link_up;
1110
1111         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1112                 bp->link_up = 1;
1113                 return 0;
1114         }
1115
1116         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117                 return 0;
1118
1119         link_up = bp->link_up;
1120
1121         bnx2_enable_bmsr1(bp);
1122         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124         bnx2_disable_bmsr1(bp);
1125
1126         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128                 u32 val;
1129
1130                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131                 if (val & BNX2_EMAC_STATUS_LINK)
1132                         bmsr |= BMSR_LSTATUS;
1133                 else
1134                         bmsr &= ~BMSR_LSTATUS;
1135         }
1136
1137         if (bmsr & BMSR_LSTATUS) {
1138                 bp->link_up = 1;
1139
1140                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142                                 bnx2_5706s_linkup(bp);
1143                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144                                 bnx2_5708s_linkup(bp);
1145                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146                                 bnx2_5709s_linkup(bp);
1147                 }
1148                 else {
1149                         bnx2_copper_linkup(bp);
1150                 }
1151                 bnx2_resolve_flow_ctrl(bp);
1152         }
1153         else {
1154                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155                     (bp->autoneg & AUTONEG_SPEED))
1156                         bnx2_disable_forced_2g5(bp);
1157
1158                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159                 bp->link_up = 0;
1160         }
1161
1162         if (bp->link_up != link_up) {
1163                 bnx2_report_link(bp);
1164         }
1165
1166         bnx2_set_mac_link(bp);
1167
1168         return 0;
1169 }
1170
1171 static int
1172 bnx2_reset_phy(struct bnx2 *bp)
1173 {
1174         int i;
1175         u32 reg;
1176
1177         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1178
1179 #define PHY_RESET_MAX_WAIT 100
1180         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181                 udelay(10);
1182
1183                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1184                 if (!(reg & BMCR_RESET)) {
1185                         udelay(20);
1186                         break;
1187                 }
1188         }
1189         if (i == PHY_RESET_MAX_WAIT) {
1190                 return -EBUSY;
1191         }
1192         return 0;
1193 }
1194
1195 static u32
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197 {
1198         u32 adv = 0;
1199
1200         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204                         adv = ADVERTISE_1000XPAUSE;
1205                 }
1206                 else {
1207                         adv = ADVERTISE_PAUSE_CAP;
1208                 }
1209         }
1210         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212                         adv = ADVERTISE_1000XPSE_ASYM;
1213                 }
1214                 else {
1215                         adv = ADVERTISE_PAUSE_ASYM;
1216                 }
1217         }
1218         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221                 }
1222                 else {
1223                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224                 }
1225         }
1226         return adv;
1227 }
1228
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
1231 static int
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233 {
1234         u32 speed_arg = 0, pause_adv;
1235
1236         pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238         if (bp->autoneg & AUTONEG_SPEED) {
1239                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240                 if (bp->advertising & ADVERTISED_10baseT_Half)
1241                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242                 if (bp->advertising & ADVERTISED_10baseT_Full)
1243                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244                 if (bp->advertising & ADVERTISED_100baseT_Half)
1245                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246                 if (bp->advertising & ADVERTISED_100baseT_Full)
1247                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252         } else {
1253                 if (bp->req_line_speed == SPEED_2500)
1254                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255                 else if (bp->req_line_speed == SPEED_1000)
1256                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257                 else if (bp->req_line_speed == SPEED_100) {
1258                         if (bp->req_duplex == DUPLEX_FULL)
1259                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260                         else
1261                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262                 } else if (bp->req_line_speed == SPEED_10) {
1263                         if (bp->req_duplex == DUPLEX_FULL)
1264                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265                         else
1266                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267                 }
1268         }
1269
1270         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275         if (port == PORT_TP)
1276                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281         spin_unlock_bh(&bp->phy_lock);
1282         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283         spin_lock_bh(&bp->phy_lock);
1284
1285         return 0;
1286 }
1287
1288 static int
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1290 {
1291         u32 adv, bmcr;
1292         u32 new_adv = 0;
1293
1294         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295                 return (bnx2_setup_remote_phy(bp, port));
1296
1297         if (!(bp->autoneg & AUTONEG_SPEED)) {
1298                 u32 new_bmcr;
1299                 int force_link_down = 0;
1300
1301                 if (bp->req_line_speed == SPEED_2500) {
1302                         if (!bnx2_test_and_enable_2g5(bp))
1303                                 force_link_down = 1;
1304                 } else if (bp->req_line_speed == SPEED_1000) {
1305                         if (bnx2_test_and_disable_2g5(bp))
1306                                 force_link_down = 1;
1307                 }
1308                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
1311                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313                 new_bmcr |= BMCR_SPEED1000;
1314
1315                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316                         if (bp->req_line_speed == SPEED_2500)
1317                                 bnx2_enable_forced_2g5(bp);
1318                         else if (bp->req_line_speed == SPEED_1000) {
1319                                 bnx2_disable_forced_2g5(bp);
1320                                 new_bmcr &= ~0x2000;
1321                         }
1322
1323                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324                         if (bp->req_line_speed == SPEED_2500)
1325                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326                         else
1327                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1328                 }
1329
1330                 if (bp->req_duplex == DUPLEX_FULL) {
1331                         adv |= ADVERTISE_1000XFULL;
1332                         new_bmcr |= BMCR_FULLDPLX;
1333                 }
1334                 else {
1335                         adv |= ADVERTISE_1000XHALF;
1336                         new_bmcr &= ~BMCR_FULLDPLX;
1337                 }
1338                 if ((new_bmcr != bmcr) || (force_link_down)) {
1339                         /* Force a link down visible on the other side */
1340                         if (bp->link_up) {
1341                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1342                                                ~(ADVERTISE_1000XFULL |
1343                                                  ADVERTISE_1000XHALF));
1344                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345                                         BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347                                 bp->link_up = 0;
1348                                 netif_carrier_off(bp->dev);
1349                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350                                 bnx2_report_link(bp);
1351                         }
1352                         bnx2_write_phy(bp, bp->mii_adv, adv);
1353                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1354                 } else {
1355                         bnx2_resolve_flow_ctrl(bp);
1356                         bnx2_set_mac_link(bp);
1357                 }
1358                 return 0;
1359         }
1360
1361         bnx2_test_and_enable_2g5(bp);
1362
1363         if (bp->advertising & ADVERTISED_1000baseT_Full)
1364                 new_adv |= ADVERTISE_1000XFULL;
1365
1366         new_adv |= bnx2_phy_get_pause_adv(bp);
1367
1368         bnx2_read_phy(bp, bp->mii_adv, &adv);
1369         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1370
1371         bp->serdes_an_pending = 0;
1372         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373                 /* Force a link down visible on the other side */
1374                 if (bp->link_up) {
1375                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376                         spin_unlock_bh(&bp->phy_lock);
1377                         msleep(20);
1378                         spin_lock_bh(&bp->phy_lock);
1379                 }
1380
1381                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1383                         BMCR_ANENABLE);
1384                 /* Speed up link-up time when the link partner
1385                  * does not autonegotiate which is very common
1386                  * in blade servers. Some blade servers use
1387                  * IPMI for kerboard input and it's important
1388                  * to minimize link disruptions. Autoneg. involves
1389                  * exchanging base pages plus 3 next pages and
1390                  * normally completes in about 120 msec.
1391                  */
1392                 bp->current_interval = SERDES_AN_TIMEOUT;
1393                 bp->serdes_an_pending = 1;
1394                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1395         } else {
1396                 bnx2_resolve_flow_ctrl(bp);
1397                 bnx2_set_mac_link(bp);
1398         }
1399
1400         return 0;
1401 }
1402
1403 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1404         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1405                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406                 (ADVERTISED_1000baseT_Full)
1407
1408 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1409         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1410         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1411         ADVERTISED_1000baseT_Full)
1412
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1415
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
1418 static void
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1420 {
1421         u32 link;
1422
1423         if (bp->phy_port == PORT_TP)
1424                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425         else
1426                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429                 bp->req_line_speed = 0;
1430                 bp->autoneg |= AUTONEG_SPEED;
1431                 bp->advertising = ADVERTISED_Autoneg;
1432                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433                         bp->advertising |= ADVERTISED_10baseT_Half;
1434                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435                         bp->advertising |= ADVERTISED_10baseT_Full;
1436                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437                         bp->advertising |= ADVERTISED_100baseT_Half;
1438                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439                         bp->advertising |= ADVERTISED_100baseT_Full;
1440                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441                         bp->advertising |= ADVERTISED_1000baseT_Full;
1442                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443                         bp->advertising |= ADVERTISED_2500baseX_Full;
1444         } else {
1445                 bp->autoneg = 0;
1446                 bp->advertising = 0;
1447                 bp->req_duplex = DUPLEX_FULL;
1448                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449                         bp->req_line_speed = SPEED_10;
1450                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451                                 bp->req_duplex = DUPLEX_HALF;
1452                 }
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454                         bp->req_line_speed = SPEED_100;
1455                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456                                 bp->req_duplex = DUPLEX_HALF;
1457                 }
1458                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459                         bp->req_line_speed = SPEED_1000;
1460                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461                         bp->req_line_speed = SPEED_2500;
1462         }
1463 }
1464
1465 static void
1466 bnx2_set_default_link(struct bnx2 *bp)
1467 {
1468         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469                 return bnx2_set_default_remote_link(bp);
1470
1471         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472         bp->req_line_speed = 0;
1473         if (bp->phy_flags & PHY_SERDES_FLAG) {
1474                 u32 reg;
1475
1476                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481                         bp->autoneg = 0;
1482                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1483                         bp->req_duplex = DUPLEX_FULL;
1484                 }
1485         } else
1486                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487 }
1488
1489 static void
1490 bnx2_remote_phy_event(struct bnx2 *bp)
1491 {
1492         u32 msg;
1493         u8 link_up = bp->link_up;
1494         u8 old_port;
1495
1496         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1497
1498         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1499                 bp->link_up = 0;
1500         else {
1501                 u32 speed;
1502
1503                 bp->link_up = 1;
1504                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505                 bp->duplex = DUPLEX_FULL;
1506                 switch (speed) {
1507                         case BNX2_LINK_STATUS_10HALF:
1508                                 bp->duplex = DUPLEX_HALF;
1509                         case BNX2_LINK_STATUS_10FULL:
1510                                 bp->line_speed = SPEED_10;
1511                                 break;
1512                         case BNX2_LINK_STATUS_100HALF:
1513                                 bp->duplex = DUPLEX_HALF;
1514                         case BNX2_LINK_STATUS_100BASE_T4:
1515                         case BNX2_LINK_STATUS_100FULL:
1516                                 bp->line_speed = SPEED_100;
1517                                 break;
1518                         case BNX2_LINK_STATUS_1000HALF:
1519                                 bp->duplex = DUPLEX_HALF;
1520                         case BNX2_LINK_STATUS_1000FULL:
1521                                 bp->line_speed = SPEED_1000;
1522                                 break;
1523                         case BNX2_LINK_STATUS_2500HALF:
1524                                 bp->duplex = DUPLEX_HALF;
1525                         case BNX2_LINK_STATUS_2500FULL:
1526                                 bp->line_speed = SPEED_2500;
1527                                 break;
1528                         default:
1529                                 bp->line_speed = 0;
1530                                 break;
1531                 }
1532
1533                 spin_lock(&bp->phy_lock);
1534                 bp->flow_ctrl = 0;
1535                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537                         if (bp->duplex == DUPLEX_FULL)
1538                                 bp->flow_ctrl = bp->req_flow_ctrl;
1539                 } else {
1540                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1542                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1544                 }
1545
1546                 old_port = bp->phy_port;
1547                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548                         bp->phy_port = PORT_FIBRE;
1549                 else
1550                         bp->phy_port = PORT_TP;
1551
1552                 if (old_port != bp->phy_port)
1553                         bnx2_set_default_link(bp);
1554
1555                 spin_unlock(&bp->phy_lock);
1556         }
1557         if (bp->link_up != link_up)
1558                 bnx2_report_link(bp);
1559
1560         bnx2_set_mac_link(bp);
1561 }
1562
1563 static int
1564 bnx2_set_remote_link(struct bnx2 *bp)
1565 {
1566         u32 evt_code;
1567
1568         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1569         switch (evt_code) {
1570                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571                         bnx2_remote_phy_event(bp);
1572                         break;
1573                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1574                 default:
1575                         break;
1576         }
1577         return 0;
1578 }
1579
1580 static int
1581 bnx2_setup_copper_phy(struct bnx2 *bp)
1582 {
1583         u32 bmcr;
1584         u32 new_bmcr;
1585
1586         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1587
1588         if (bp->autoneg & AUTONEG_SPEED) {
1589                 u32 adv_reg, adv1000_reg;
1590                 u32 new_adv_reg = 0;
1591                 u32 new_adv1000_reg = 0;
1592
1593                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1594                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595                         ADVERTISE_PAUSE_ASYM);
1596
1597                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598                 adv1000_reg &= PHY_ALL_1000_SPEED;
1599
1600                 if (bp->advertising & ADVERTISED_10baseT_Half)
1601                         new_adv_reg |= ADVERTISE_10HALF;
1602                 if (bp->advertising & ADVERTISED_10baseT_Full)
1603                         new_adv_reg |= ADVERTISE_10FULL;
1604                 if (bp->advertising & ADVERTISED_100baseT_Half)
1605                         new_adv_reg |= ADVERTISE_100HALF;
1606                 if (bp->advertising & ADVERTISED_100baseT_Full)
1607                         new_adv_reg |= ADVERTISE_100FULL;
1608                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609                         new_adv1000_reg |= ADVERTISE_1000FULL;
1610
1611                 new_adv_reg |= ADVERTISE_CSMA;
1612
1613                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1614
1615                 if ((adv1000_reg != new_adv1000_reg) ||
1616                         (adv_reg != new_adv_reg) ||
1617                         ((bmcr & BMCR_ANENABLE) == 0)) {
1618
1619                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1620                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1621                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1622                                 BMCR_ANENABLE);
1623                 }
1624                 else if (bp->link_up) {
1625                         /* Flow ctrl may have changed from auto to forced */
1626                         /* or vice-versa. */
1627
1628                         bnx2_resolve_flow_ctrl(bp);
1629                         bnx2_set_mac_link(bp);
1630                 }
1631                 return 0;
1632         }
1633
1634         new_bmcr = 0;
1635         if (bp->req_line_speed == SPEED_100) {
1636                 new_bmcr |= BMCR_SPEED100;
1637         }
1638         if (bp->req_duplex == DUPLEX_FULL) {
1639                 new_bmcr |= BMCR_FULLDPLX;
1640         }
1641         if (new_bmcr != bmcr) {
1642                 u32 bmsr;
1643
1644                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1646
1647                 if (bmsr & BMSR_LSTATUS) {
1648                         /* Force link down */
1649                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1650                         spin_unlock_bh(&bp->phy_lock);
1651                         msleep(50);
1652                         spin_lock_bh(&bp->phy_lock);
1653
1654                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1656                 }
1657
1658                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1659
1660                 /* Normally, the new speed is setup after the link has
1661                  * gone down and up again. In some cases, link will not go
1662                  * down so we need to set up the new speed here.
1663                  */
1664                 if (bmsr & BMSR_LSTATUS) {
1665                         bp->line_speed = bp->req_line_speed;
1666                         bp->duplex = bp->req_duplex;
1667                         bnx2_resolve_flow_ctrl(bp);
1668                         bnx2_set_mac_link(bp);
1669                 }
1670         } else {
1671                 bnx2_resolve_flow_ctrl(bp);
1672                 bnx2_set_mac_link(bp);
1673         }
1674         return 0;
1675 }
1676
1677 static int
1678 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1679 {
1680         if (bp->loopback == MAC_LOOPBACK)
1681                 return 0;
1682
1683         if (bp->phy_flags & PHY_SERDES_FLAG) {
1684                 return (bnx2_setup_serdes_phy(bp, port));
1685         }
1686         else {
1687                 return (bnx2_setup_copper_phy(bp));
1688         }
1689 }
1690
1691 static int
1692 bnx2_init_5709s_phy(struct bnx2 *bp)
1693 {
1694         u32 val;
1695
1696         bp->mii_bmcr = MII_BMCR + 0x10;
1697         bp->mii_bmsr = MII_BMSR + 0x10;
1698         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699         bp->mii_adv = MII_ADVERTISE + 0x10;
1700         bp->mii_lpa = MII_LPA + 0x10;
1701         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1702
1703         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1705
1706         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1707         bnx2_reset_phy(bp);
1708
1709         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1710
1711         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1715
1716         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719                 val |= BCM5708S_UP1_2G5;
1720         else
1721                 val &= ~BCM5708S_UP1_2G5;
1722         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1723
1724         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1728
1729         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1730
1731         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1734
1735         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1736
1737         return 0;
1738 }
1739
1740 static int
1741 bnx2_init_5708s_phy(struct bnx2 *bp)
1742 {
1743         u32 val;
1744
1745         bnx2_reset_phy(bp);
1746
1747         bp->mii_up1 = BCM5708S_UP1;
1748
1749         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1752
1753         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1756
1757         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1760
1761         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763                 val |= BCM5708S_UP1_2G5;
1764                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1765         }
1766
1767         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1768             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1770                 /* increase tx signal amplitude */
1771                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772                                BCM5708S_BLK_ADDR_TX_MISC);
1773                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777         }
1778
1779         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1780               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1781
1782         if (val) {
1783                 u32 is_backplane;
1784
1785                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1786                                           BNX2_SHARED_HW_CFG_CONFIG);
1787                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789                                        BCM5708S_BLK_ADDR_TX_MISC);
1790                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792                                        BCM5708S_BLK_ADDR_DIG);
1793                 }
1794         }
1795         return 0;
1796 }
1797
1798 static int
1799 bnx2_init_5706s_phy(struct bnx2 *bp)
1800 {
1801         bnx2_reset_phy(bp);
1802
1803         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1804
1805         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1807
1808         if (bp->dev->mtu > 1500) {
1809                 u32 val;
1810
1811                 /* Set extended packet length bit */
1812                 bnx2_write_phy(bp, 0x18, 0x7);
1813                 bnx2_read_phy(bp, 0x18, &val);
1814                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1815
1816                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817                 bnx2_read_phy(bp, 0x1c, &val);
1818                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1819         }
1820         else {
1821                 u32 val;
1822
1823                 bnx2_write_phy(bp, 0x18, 0x7);
1824                 bnx2_read_phy(bp, 0x18, &val);
1825                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1826
1827                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828                 bnx2_read_phy(bp, 0x1c, &val);
1829                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1830         }
1831
1832         return 0;
1833 }
1834
1835 static int
1836 bnx2_init_copper_phy(struct bnx2 *bp)
1837 {
1838         u32 val;
1839
1840         bnx2_reset_phy(bp);
1841
1842         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843                 bnx2_write_phy(bp, 0x18, 0x0c00);
1844                 bnx2_write_phy(bp, 0x17, 0x000a);
1845                 bnx2_write_phy(bp, 0x15, 0x310b);
1846                 bnx2_write_phy(bp, 0x17, 0x201f);
1847                 bnx2_write_phy(bp, 0x15, 0x9506);
1848                 bnx2_write_phy(bp, 0x17, 0x401f);
1849                 bnx2_write_phy(bp, 0x15, 0x14e2);
1850                 bnx2_write_phy(bp, 0x18, 0x0400);
1851         }
1852
1853         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1856                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1857                 val &= ~(1 << 8);
1858                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1859         }
1860
1861         if (bp->dev->mtu > 1500) {
1862                 /* Set extended packet length bit */
1863                 bnx2_write_phy(bp, 0x18, 0x7);
1864                 bnx2_read_phy(bp, 0x18, &val);
1865                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1866
1867                 bnx2_read_phy(bp, 0x10, &val);
1868                 bnx2_write_phy(bp, 0x10, val | 0x1);
1869         }
1870         else {
1871                 bnx2_write_phy(bp, 0x18, 0x7);
1872                 bnx2_read_phy(bp, 0x18, &val);
1873                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1874
1875                 bnx2_read_phy(bp, 0x10, &val);
1876                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1877         }
1878
1879         /* ethernet@wirespeed */
1880         bnx2_write_phy(bp, 0x18, 0x7007);
1881         bnx2_read_phy(bp, 0x18, &val);
1882         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1883         return 0;
1884 }
1885
1886
1887 static int
1888 bnx2_init_phy(struct bnx2 *bp)
1889 {
1890         u32 val;
1891         int rc = 0;
1892
1893         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1895
1896         bp->mii_bmcr = MII_BMCR;
1897         bp->mii_bmsr = MII_BMSR;
1898         bp->mii_bmsr1 = MII_BMSR;
1899         bp->mii_adv = MII_ADVERTISE;
1900         bp->mii_lpa = MII_LPA;
1901
1902         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1903
1904         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1905                 goto setup_phy;
1906
1907         bnx2_read_phy(bp, MII_PHYSID1, &val);
1908         bp->phy_id = val << 16;
1909         bnx2_read_phy(bp, MII_PHYSID2, &val);
1910         bp->phy_id |= val & 0xffff;
1911
1912         if (bp->phy_flags & PHY_SERDES_FLAG) {
1913                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914                         rc = bnx2_init_5706s_phy(bp);
1915                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916                         rc = bnx2_init_5708s_phy(bp);
1917                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918                         rc = bnx2_init_5709s_phy(bp);
1919         }
1920         else {
1921                 rc = bnx2_init_copper_phy(bp);
1922         }
1923
1924 setup_phy:
1925         if (!rc)
1926                 rc = bnx2_setup_phy(bp, bp->phy_port);
1927
1928         return rc;
1929 }
1930
1931 static int
1932 bnx2_set_mac_loopback(struct bnx2 *bp)
1933 {
1934         u32 mac_mode;
1935
1936         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1940         bp->link_up = 1;
1941         return 0;
1942 }
1943
1944 static int bnx2_test_link(struct bnx2 *);
1945
1946 static int
1947 bnx2_set_phy_loopback(struct bnx2 *bp)
1948 {
1949         u32 mac_mode;
1950         int rc, i;
1951
1952         spin_lock_bh(&bp->phy_lock);
1953         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1954                             BMCR_SPEED1000);
1955         spin_unlock_bh(&bp->phy_lock);
1956         if (rc)
1957                 return rc;
1958
1959         for (i = 0; i < 10; i++) {
1960                 if (bnx2_test_link(bp) == 0)
1961                         break;
1962                 msleep(100);
1963         }
1964
1965         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1968                       BNX2_EMAC_MODE_25G_MODE);
1969
1970         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1972         bp->link_up = 1;
1973         return 0;
1974 }
1975
1976 static int
1977 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1978 {
1979         int i;
1980         u32 val;
1981
1982         bp->fw_wr_seq++;
1983         msg_data |= bp->fw_wr_seq;
1984
1985         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1986
1987         /* wait for an acknowledgement. */
1988         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1989                 msleep(10);
1990
1991                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1992
1993                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1994                         break;
1995         }
1996         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1997                 return 0;
1998
1999         /* If we timed out, inform the firmware that this is the case. */
2000         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2001                 if (!silent)
2002                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2003                                             "%x\n", msg_data);
2004
2005                 msg_data &= ~BNX2_DRV_MSG_CODE;
2006                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2007
2008                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2009
2010                 return -EBUSY;
2011         }
2012
2013         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2014                 return -EIO;
2015
2016         return 0;
2017 }
2018
2019 static int
2020 bnx2_init_5709_context(struct bnx2 *bp)
2021 {
2022         int i, ret = 0;
2023         u32 val;
2024
2025         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026         val |= (BCM_PAGE_BITS - 8) << 16;
2027         REG_WR(bp, BNX2_CTX_COMMAND, val);
2028         for (i = 0; i < 10; i++) {
2029                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2031                         break;
2032                 udelay(2);
2033         }
2034         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2035                 return -EBUSY;
2036
2037         for (i = 0; i < bp->ctx_pages; i++) {
2038                 int j;
2039
2040                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044                        (u64) bp->ctx_blk_mapping[i] >> 32);
2045                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047                 for (j = 0; j < 10; j++) {
2048
2049                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2051                                 break;
2052                         udelay(5);
2053                 }
2054                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2055                         ret = -EBUSY;
2056                         break;
2057                 }
2058         }
2059         return ret;
2060 }
2061
2062 static void
2063 bnx2_init_context(struct bnx2 *bp)
2064 {
2065         u32 vcid;
2066
2067         vcid = 96;
2068         while (vcid) {
2069                 u32 vcid_addr, pcid_addr, offset;
2070                 int i;
2071
2072                 vcid--;
2073
2074                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2075                         u32 new_vcid;
2076
2077                         vcid_addr = GET_PCID_ADDR(vcid);
2078                         if (vcid & 0x8) {
2079                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2080                         }
2081                         else {
2082                                 new_vcid = vcid;
2083                         }
2084                         pcid_addr = GET_PCID_ADDR(new_vcid);
2085                 }
2086                 else {
2087                         vcid_addr = GET_CID_ADDR(vcid);
2088                         pcid_addr = vcid_addr;
2089                 }
2090
2091                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092                         vcid_addr += (i << PHY_CTX_SHIFT);
2093                         pcid_addr += (i << PHY_CTX_SHIFT);
2094
2095                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2097
2098                         /* Zero out the context. */
2099                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100                                 CTX_WR(bp, 0x00, offset, 0);
2101
2102                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2104                 }
2105         }
2106 }
2107
2108 static int
2109 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2110 {
2111         u16 *good_mbuf;
2112         u32 good_mbuf_cnt;
2113         u32 val;
2114
2115         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116         if (good_mbuf == NULL) {
2117                 printk(KERN_ERR PFX "Failed to allocate memory in "
2118                                     "bnx2_alloc_bad_rbuf\n");
2119                 return -ENOMEM;
2120         }
2121
2122         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2124
2125         good_mbuf_cnt = 0;
2126
2127         /* Allocate a bunch of mbufs and save the good ones in an array. */
2128         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2131
2132                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2133
2134                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2135
2136                 /* The addresses with Bit 9 set are bad memory blocks. */
2137                 if (!(val & (1 << 9))) {
2138                         good_mbuf[good_mbuf_cnt] = (u16) val;
2139                         good_mbuf_cnt++;
2140                 }
2141
2142                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2143         }
2144
2145         /* Free the good ones back to the mbuf pool thus discarding
2146          * all the bad ones. */
2147         while (good_mbuf_cnt) {
2148                 good_mbuf_cnt--;
2149
2150                 val = good_mbuf[good_mbuf_cnt];
2151                 val = (val << 9) | val | 1;
2152
2153                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2154         }
2155         kfree(good_mbuf);
2156         return 0;
2157 }
2158
2159 static void
2160 bnx2_set_mac_addr(struct bnx2 *bp)
2161 {
2162         u32 val;
2163         u8 *mac_addr = bp->dev->dev_addr;
2164
2165         val = (mac_addr[0] << 8) | mac_addr[1];
2166
2167         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2168
2169         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2170                 (mac_addr[4] << 8) | mac_addr[5];
2171
2172         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2173 }
2174
2175 static inline int
2176 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2177 {
2178         struct sk_buff *skb;
2179         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2180         dma_addr_t mapping;
2181         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2182         unsigned long align;
2183
2184         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2185         if (skb == NULL) {
2186                 return -ENOMEM;
2187         }
2188
2189         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2191
2192         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193                 PCI_DMA_FROMDEVICE);
2194
2195         rx_buf->skb = skb;
2196         pci_unmap_addr_set(rx_buf, mapping, mapping);
2197
2198         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2200
2201         bp->rx_prod_bseq += bp->rx_buf_use_size;
2202
2203         return 0;
2204 }
2205
2206 static int
2207 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2208 {
2209         struct status_block *sblk = bp->status_blk;
2210         u32 new_link_state, old_link_state;
2211         int is_set = 1;
2212
2213         new_link_state = sblk->status_attn_bits & event;
2214         old_link_state = sblk->status_attn_bits_ack & event;
2215         if (new_link_state != old_link_state) {
2216                 if (new_link_state)
2217                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2218                 else
2219                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2220         } else
2221                 is_set = 0;
2222
2223         return is_set;
2224 }
2225
2226 static void
2227 bnx2_phy_int(struct bnx2 *bp)
2228 {
2229         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230                 spin_lock(&bp->phy_lock);
2231                 bnx2_set_link(bp);
2232                 spin_unlock(&bp->phy_lock);
2233         }
2234         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235                 bnx2_set_remote_link(bp);
2236
2237 }
2238
2239 static void
2240 bnx2_tx_int(struct bnx2 *bp)
2241 {
2242         struct status_block *sblk = bp->status_blk;
2243         u16 hw_cons, sw_cons, sw_ring_cons;
2244         int tx_free_bd = 0;
2245
2246         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2247         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2248                 hw_cons++;
2249         }
2250         sw_cons = bp->tx_cons;
2251
2252         while (sw_cons != hw_cons) {
2253                 struct sw_bd *tx_buf;
2254                 struct sk_buff *skb;
2255                 int i, last;
2256
2257                 sw_ring_cons = TX_RING_IDX(sw_cons);
2258
2259                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2260                 skb = tx_buf->skb;
2261
2262                 /* partial BD completions possible with TSO packets */
2263                 if (skb_is_gso(skb)) {
2264                         u16 last_idx, last_ring_idx;
2265
2266                         last_idx = sw_cons +
2267                                 skb_shinfo(skb)->nr_frags + 1;
2268                         last_ring_idx = sw_ring_cons +
2269                                 skb_shinfo(skb)->nr_frags + 1;
2270                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2271                                 last_idx++;
2272                         }
2273                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2274                                 break;
2275                         }
2276                 }
2277
2278                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279                         skb_headlen(skb), PCI_DMA_TODEVICE);
2280
2281                 tx_buf->skb = NULL;
2282                 last = skb_shinfo(skb)->nr_frags;
2283
2284                 for (i = 0; i < last; i++) {
2285                         sw_cons = NEXT_TX_BD(sw_cons);
2286
2287                         pci_unmap_page(bp->pdev,
2288                                 pci_unmap_addr(
2289                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2290                                         mapping),
2291                                 skb_shinfo(skb)->frags[i].size,
2292                                 PCI_DMA_TODEVICE);
2293                 }
2294
2295                 sw_cons = NEXT_TX_BD(sw_cons);
2296
2297                 tx_free_bd += last + 1;
2298
2299                 dev_kfree_skb(skb);
2300
2301                 hw_cons = bp->hw_tx_cons =
2302                         sblk->status_tx_quick_consumer_index0;
2303
2304                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2305                         hw_cons++;
2306                 }
2307         }
2308
2309         bp->tx_cons = sw_cons;
2310         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311          * before checking for netif_queue_stopped().  Without the
2312          * memory barrier, there is a small possibility that bnx2_start_xmit()
2313          * will miss it and cause the queue to be stopped forever.
2314          */
2315         smp_mb();
2316
2317         if (unlikely(netif_queue_stopped(bp->dev)) &&
2318                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319                 netif_tx_lock(bp->dev);
2320                 if ((netif_queue_stopped(bp->dev)) &&
2321                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2322                         netif_wake_queue(bp->dev);
2323                 netif_tx_unlock(bp->dev);
2324         }
2325 }
2326
2327 static inline void
2328 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2329         u16 cons, u16 prod)
2330 {
2331         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332         struct rx_bd *cons_bd, *prod_bd;
2333
2334         cons_rx_buf = &bp->rx_buf_ring[cons];
2335         prod_rx_buf = &bp->rx_buf_ring[prod];
2336
2337         pci_dma_sync_single_for_device(bp->pdev,
2338                 pci_unmap_addr(cons_rx_buf, mapping),
2339                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2340
2341         bp->rx_prod_bseq += bp->rx_buf_use_size;
2342
2343         prod_rx_buf->skb = skb;
2344
2345         if (cons == prod)
2346                 return;
2347
2348         pci_unmap_addr_set(prod_rx_buf, mapping,
2349                         pci_unmap_addr(cons_rx_buf, mapping));
2350
2351         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2353         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2355 }
2356
2357 static int
2358 bnx2_rx_int(struct bnx2 *bp, int budget)
2359 {
2360         struct status_block *sblk = bp->status_blk;
2361         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362         struct l2_fhdr *rx_hdr;
2363         int rx_pkt = 0;
2364
2365         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2366         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2367                 hw_cons++;
2368         }
2369         sw_cons = bp->rx_cons;
2370         sw_prod = bp->rx_prod;
2371
2372         /* Memory barrier necessary as speculative reads of the rx
2373          * buffer can be ahead of the index in the status block
2374          */
2375         rmb();
2376         while (sw_cons != hw_cons) {
2377                 unsigned int len;
2378                 u32 status;
2379                 struct sw_bd *rx_buf;
2380                 struct sk_buff *skb;
2381                 dma_addr_t dma_addr;
2382
2383                 sw_ring_cons = RX_RING_IDX(sw_cons);
2384                 sw_ring_prod = RX_RING_IDX(sw_prod);
2385
2386                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2387                 skb = rx_buf->skb;
2388
2389                 rx_buf->skb = NULL;
2390
2391                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2392
2393                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2394                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2395
2396                 rx_hdr = (struct l2_fhdr *) skb->data;
2397                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2398
2399                 if ((status = rx_hdr->l2_fhdr_status) &
2400                         (L2_FHDR_ERRORS_BAD_CRC |
2401                         L2_FHDR_ERRORS_PHY_DECODE |
2402                         L2_FHDR_ERRORS_ALIGNMENT |
2403                         L2_FHDR_ERRORS_TOO_SHORT |
2404                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2405
2406                         goto reuse_rx;
2407                 }
2408
2409                 /* Since we don't have a jumbo ring, copy small packets
2410                  * if mtu > 1500
2411                  */
2412                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413                         struct sk_buff *new_skb;
2414
2415                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2416                         if (new_skb == NULL)
2417                                 goto reuse_rx;
2418
2419                         /* aligned copy */
2420                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421                                       new_skb->data, len + 2);
2422                         skb_reserve(new_skb, 2);
2423                         skb_put(new_skb, len);
2424
2425                         bnx2_reuse_rx_skb(bp, skb,
2426                                 sw_ring_cons, sw_ring_prod);
2427
2428                         skb = new_skb;
2429                 }
2430                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2431                         pci_unmap_single(bp->pdev, dma_addr,
2432                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2433
2434                         skb_reserve(skb, bp->rx_offset);
2435                         skb_put(skb, len);
2436                 }
2437                 else {
2438 reuse_rx:
2439                         bnx2_reuse_rx_skb(bp, skb,
2440                                 sw_ring_cons, sw_ring_prod);
2441                         goto next_rx;
2442                 }
2443
2444                 skb->protocol = eth_type_trans(skb, bp->dev);
2445
2446                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2447                         (ntohs(skb->protocol) != 0x8100)) {
2448
2449                         dev_kfree_skb(skb);
2450                         goto next_rx;
2451
2452                 }
2453
2454                 skb->ip_summed = CHECKSUM_NONE;
2455                 if (bp->rx_csum &&
2456                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2458
2459                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2461                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2462                 }
2463
2464 #ifdef BCM_VLAN
2465                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467                                 rx_hdr->l2_fhdr_vlan_tag);
2468                 }
2469                 else
2470 #endif
2471                         netif_receive_skb(skb);
2472
2473                 bp->dev->last_rx = jiffies;
2474                 rx_pkt++;
2475
2476 next_rx:
2477                 sw_cons = NEXT_RX_BD(sw_cons);
2478                 sw_prod = NEXT_RX_BD(sw_prod);
2479
2480                 if ((rx_pkt == budget))
2481                         break;
2482
2483                 /* Refresh hw_cons to see if there is new work */
2484                 if (sw_cons == hw_cons) {
2485                         hw_cons = bp->hw_rx_cons =
2486                                 sblk->status_rx_quick_consumer_index0;
2487                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2488                                 hw_cons++;
2489                         rmb();
2490                 }
2491         }
2492         bp->rx_cons = sw_cons;
2493         bp->rx_prod = sw_prod;
2494
2495         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2496
2497         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2498
2499         mmiowb();
2500
2501         return rx_pkt;
2502
2503 }
2504
2505 /* MSI ISR - The only difference between this and the INTx ISR
2506  * is that the MSI interrupt is always serviced.
2507  */
2508 static irqreturn_t
2509 bnx2_msi(int irq, void *dev_instance)
2510 {
2511         struct net_device *dev = dev_instance;
2512         struct bnx2 *bp = netdev_priv(dev);
2513
2514         prefetch(bp->status_blk);
2515         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2518
2519         /* Return here if interrupt is disabled. */
2520         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2521                 return IRQ_HANDLED;
2522
2523         netif_rx_schedule(dev);
2524
2525         return IRQ_HANDLED;
2526 }
2527
2528 static irqreturn_t
2529 bnx2_msi_1shot(int irq, void *dev_instance)
2530 {
2531         struct net_device *dev = dev_instance;
2532         struct bnx2 *bp = netdev_priv(dev);
2533
2534         prefetch(bp->status_blk);
2535
2536         /* Return here if interrupt is disabled. */
2537         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2538                 return IRQ_HANDLED;
2539
2540         netif_rx_schedule(dev);
2541
2542         return IRQ_HANDLED;
2543 }
2544
2545 static irqreturn_t
2546 bnx2_interrupt(int irq, void *dev_instance)
2547 {
2548         struct net_device *dev = dev_instance;
2549         struct bnx2 *bp = netdev_priv(dev);
2550         struct status_block *sblk = bp->status_blk;
2551
2552         /* When using INTx, it is possible for the interrupt to arrive
2553          * at the CPU before the status block posted prior to the
2554          * interrupt. Reading a register will flush the status block.
2555          * When using MSI, the MSI message will always complete after
2556          * the status block write.
2557          */
2558         if ((sblk->status_idx == bp->last_status_idx) &&
2559             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2560              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2561                 return IRQ_NONE;
2562
2563         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2564                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2565                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2566
2567         /* Read back to deassert IRQ immediately to avoid too many
2568          * spurious interrupts.
2569          */
2570         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2571
2572         /* Return here if interrupt is shared and is disabled. */
2573         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2574                 return IRQ_HANDLED;
2575
2576         if (netif_rx_schedule_prep(dev)) {
2577                 bp->last_status_idx = sblk->status_idx;
2578                 __netif_rx_schedule(dev);
2579         }
2580
2581         return IRQ_HANDLED;
2582 }
2583
2584 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2585                                  STATUS_ATTN_BITS_TIMER_ABORT)
2586
2587 static inline int
2588 bnx2_has_work(struct bnx2 *bp)
2589 {
2590         struct status_block *sblk = bp->status_blk;
2591
2592         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2593             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2594                 return 1;
2595
2596         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2597             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2598                 return 1;
2599
2600         return 0;
2601 }
2602
2603 static int
2604 bnx2_poll(struct net_device *dev, int *budget)
2605 {
2606         struct bnx2 *bp = netdev_priv(dev);
2607         struct status_block *sblk = bp->status_blk;
2608         u32 status_attn_bits = sblk->status_attn_bits;
2609         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2610
2611         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2612             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2613
2614                 bnx2_phy_int(bp);
2615
2616                 /* This is needed to take care of transient status
2617                  * during link changes.
2618                  */
2619                 REG_WR(bp, BNX2_HC_COMMAND,
2620                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2621                 REG_RD(bp, BNX2_HC_COMMAND);
2622         }
2623
2624         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2625                 bnx2_tx_int(bp);
2626
2627         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2628                 int orig_budget = *budget;
2629                 int work_done;
2630
2631                 if (orig_budget > dev->quota)
2632                         orig_budget = dev->quota;
2633
2634                 work_done = bnx2_rx_int(bp, orig_budget);
2635                 *budget -= work_done;
2636                 dev->quota -= work_done;
2637         }
2638
2639         bp->last_status_idx = bp->status_blk->status_idx;
2640         rmb();
2641
2642         if (!bnx2_has_work(bp)) {
2643                 netif_rx_complete(dev);
2644                 if (likely(bp->flags & USING_MSI_FLAG)) {
2645                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2646                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2647                                bp->last_status_idx);
2648                         return 0;
2649                 }
2650                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2651                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2652                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2653                        bp->last_status_idx);
2654
2655                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2656                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2657                        bp->last_status_idx);
2658                 return 0;
2659         }
2660
2661         return 1;
2662 }
2663
2664 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2665  * from set_multicast.
2666  */
2667 static void
2668 bnx2_set_rx_mode(struct net_device *dev)
2669 {
2670         struct bnx2 *bp = netdev_priv(dev);
2671         u32 rx_mode, sort_mode;
2672         int i;
2673
2674         spin_lock_bh(&bp->phy_lock);
2675
2676         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2677                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2678         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2679 #ifdef BCM_VLAN
2680         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2681                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2682 #else
2683         if (!(bp->flags & ASF_ENABLE_FLAG))
2684                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2685 #endif
2686         if (dev->flags & IFF_PROMISC) {
2687                 /* Promiscuous mode. */
2688                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2689                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2690                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2691         }
2692         else if (dev->flags & IFF_ALLMULTI) {
2693                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2694                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2695                                0xffffffff);
2696                 }
2697                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2698         }
2699         else {
2700                 /* Accept one or more multicast(s). */
2701                 struct dev_mc_list *mclist;
2702                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2703                 u32 regidx;
2704                 u32 bit;
2705                 u32 crc;
2706
2707                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2708
2709                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2710                      i++, mclist = mclist->next) {
2711
2712                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2713                         bit = crc & 0xff;
2714                         regidx = (bit & 0xe0) >> 5;
2715                         bit &= 0x1f;
2716                         mc_filter[regidx] |= (1 << bit);
2717                 }
2718
2719                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2720                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2721                                mc_filter[i]);
2722                 }
2723
2724                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2725         }
2726
2727         if (rx_mode != bp->rx_mode) {
2728                 bp->rx_mode = rx_mode;
2729                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2730         }
2731
2732         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2733         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2734         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2735
2736         spin_unlock_bh(&bp->phy_lock);
2737 }
2738
2739 #define FW_BUF_SIZE     0x8000
2740
2741 static int
2742 bnx2_gunzip_init(struct bnx2 *bp)
2743 {
2744         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2745                 goto gunzip_nomem1;
2746
2747         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2748                 goto gunzip_nomem2;
2749
2750         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2751         if (bp->strm->workspace == NULL)
2752                 goto gunzip_nomem3;
2753
2754         return 0;
2755
2756 gunzip_nomem3:
2757         kfree(bp->strm);
2758         bp->strm = NULL;
2759
2760 gunzip_nomem2:
2761         vfree(bp->gunzip_buf);
2762         bp->gunzip_buf = NULL;
2763
2764 gunzip_nomem1:
2765         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2766                             "uncompression.\n", bp->dev->name);
2767         return -ENOMEM;
2768 }
2769
2770 static void
2771 bnx2_gunzip_end(struct bnx2 *bp)
2772 {
2773         kfree(bp->strm->workspace);
2774
2775         kfree(bp->strm);
2776         bp->strm = NULL;
2777
2778         if (bp->gunzip_buf) {
2779                 vfree(bp->gunzip_buf);
2780                 bp->gunzip_buf = NULL;
2781         }
2782 }
2783
2784 static int
2785 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2786 {
2787         int n, rc;
2788
2789         /* check gzip header */
2790         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2791                 return -EINVAL;
2792
2793         n = 10;
2794
2795 #define FNAME   0x8
2796         if (zbuf[3] & FNAME)
2797                 while ((zbuf[n++] != 0) && (n < len));
2798
2799         bp->strm->next_in = zbuf + n;
2800         bp->strm->avail_in = len - n;
2801         bp->strm->next_out = bp->gunzip_buf;
2802         bp->strm->avail_out = FW_BUF_SIZE;
2803
2804         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2805         if (rc != Z_OK)
2806                 return rc;
2807
2808         rc = zlib_inflate(bp->strm, Z_FINISH);
2809
2810         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2811         *outbuf = bp->gunzip_buf;
2812
2813         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2814                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2815                        bp->dev->name, bp->strm->msg);
2816
2817         zlib_inflateEnd(bp->strm);
2818
2819         if (rc == Z_STREAM_END)
2820                 return 0;
2821
2822         return rc;
2823 }
2824
2825 static void
2826 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2827         u32 rv2p_proc)
2828 {
2829         int i;
2830         u32 val;
2831
2832
2833         for (i = 0; i < rv2p_code_len; i += 8) {
2834                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2835                 rv2p_code++;
2836                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2837                 rv2p_code++;
2838
2839                 if (rv2p_proc == RV2P_PROC1) {
2840                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2841                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2842                 }
2843                 else {
2844                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2845                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2846                 }
2847         }
2848
2849         /* Reset the processor, un-stall is done later. */
2850         if (rv2p_proc == RV2P_PROC1) {
2851                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2852         }
2853         else {
2854                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2855         }
2856 }
2857
2858 static int
2859 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2860 {
2861         u32 offset;
2862         u32 val;
2863         int rc;
2864
2865         /* Halt the CPU. */
2866         val = REG_RD_IND(bp, cpu_reg->mode);
2867         val |= cpu_reg->mode_value_halt;
2868         REG_WR_IND(bp, cpu_reg->mode, val);
2869         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2870
2871         /* Load the Text area. */
2872         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2873         if (fw->gz_text) {
2874                 u32 text_len;
2875                 void *text;
2876
2877                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2878                                  &text_len);
2879                 if (rc)
2880                         return rc;
2881
2882                 fw->text = text;
2883         }
2884         if (fw->gz_text) {
2885                 int j;
2886
2887                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2888                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2889                 }
2890         }
2891
2892         /* Load the Data area. */
2893         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2894         if (fw->data) {
2895                 int j;
2896
2897                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2898                         REG_WR_IND(bp, offset, fw->data[j]);
2899                 }
2900         }
2901
2902         /* Load the SBSS area. */
2903         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2904         if (fw->sbss) {
2905                 int j;
2906
2907                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2908                         REG_WR_IND(bp, offset, fw->sbss[j]);
2909                 }
2910         }
2911
2912         /* Load the BSS area. */
2913         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2914         if (fw->bss) {
2915                 int j;
2916
2917                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2918                         REG_WR_IND(bp, offset, fw->bss[j]);
2919                 }
2920         }
2921
2922         /* Load the Read-Only area. */
2923         offset = cpu_reg->spad_base +
2924                 (fw->rodata_addr - cpu_reg->mips_view_base);
2925         if (fw->rodata) {
2926                 int j;
2927
2928                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2929                         REG_WR_IND(bp, offset, fw->rodata[j]);
2930                 }
2931         }
2932
2933         /* Clear the pre-fetch instruction. */
2934         REG_WR_IND(bp, cpu_reg->inst, 0);
2935         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2936
2937         /* Start the CPU. */
2938         val = REG_RD_IND(bp, cpu_reg->mode);
2939         val &= ~cpu_reg->mode_value_halt;
2940         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2941         REG_WR_IND(bp, cpu_reg->mode, val);
2942
2943         return 0;
2944 }
2945
2946 static int
2947 bnx2_init_cpus(struct bnx2 *bp)
2948 {
2949         struct cpu_reg cpu_reg;
2950         struct fw_info *fw;
2951         int rc = 0;
2952         void *text;
2953         u32 text_len;
2954
2955         if ((rc = bnx2_gunzip_init(bp)) != 0)
2956                 return rc;
2957
2958         /* Initialize the RV2P processor. */
2959         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2960                          &text_len);
2961         if (rc)
2962                 goto init_cpu_err;
2963
2964         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2965
2966         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2967                          &text_len);
2968         if (rc)
2969                 goto init_cpu_err;
2970
2971         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2972
2973         /* Initialize the RX Processor. */
2974         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2975         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2976         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2977         cpu_reg.state = BNX2_RXP_CPU_STATE;
2978         cpu_reg.state_value_clear = 0xffffff;
2979         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2980         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2981         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2982         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2983         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2984         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2985         cpu_reg.mips_view_base = 0x8000000;
2986
2987         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2988                 fw = &bnx2_rxp_fw_09;
2989         else
2990                 fw = &bnx2_rxp_fw_06;
2991
2992         rc = load_cpu_fw(bp, &cpu_reg, fw);
2993         if (rc)
2994                 goto init_cpu_err;
2995
2996         /* Initialize the TX Processor. */
2997         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2998         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2999         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3000         cpu_reg.state = BNX2_TXP_CPU_STATE;
3001         cpu_reg.state_value_clear = 0xffffff;
3002         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3003         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3004         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3005         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3006         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3007         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3008         cpu_reg.mips_view_base = 0x8000000;
3009
3010         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011                 fw = &bnx2_txp_fw_09;
3012         else
3013                 fw = &bnx2_txp_fw_06;
3014
3015         rc = load_cpu_fw(bp, &cpu_reg, fw);
3016         if (rc)
3017                 goto init_cpu_err;
3018
3019         /* Initialize the TX Patch-up Processor. */
3020         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3021         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3022         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3023         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3024         cpu_reg.state_value_clear = 0xffffff;
3025         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3026         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3027         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3028         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3029         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3030         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3031         cpu_reg.mips_view_base = 0x8000000;
3032
3033         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034                 fw = &bnx2_tpat_fw_09;
3035         else
3036                 fw = &bnx2_tpat_fw_06;
3037
3038         rc = load_cpu_fw(bp, &cpu_reg, fw);
3039         if (rc)
3040                 goto init_cpu_err;
3041
3042         /* Initialize the Completion Processor. */
3043         cpu_reg.mode = BNX2_COM_CPU_MODE;
3044         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3045         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3046         cpu_reg.state = BNX2_COM_CPU_STATE;
3047         cpu_reg.state_value_clear = 0xffffff;
3048         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3049         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3050         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3051         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3052         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3053         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3054         cpu_reg.mips_view_base = 0x8000000;
3055
3056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057                 fw = &bnx2_com_fw_09;
3058         else
3059                 fw = &bnx2_com_fw_06;
3060
3061         rc = load_cpu_fw(bp, &cpu_reg, fw);
3062         if (rc)
3063                 goto init_cpu_err;
3064
3065         /* Initialize the Command Processor. */
3066         cpu_reg.mode = BNX2_CP_CPU_MODE;
3067         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3068         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3069         cpu_reg.state = BNX2_CP_CPU_STATE;
3070         cpu_reg.state_value_clear = 0xffffff;
3071         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3072         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3073         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3074         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3075         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3076         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3077         cpu_reg.mips_view_base = 0x8000000;
3078
3079         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3080                 fw = &bnx2_cp_fw_09;
3081
3082                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3083                 if (rc)
3084                         goto init_cpu_err;
3085         }
3086 init_cpu_err:
3087         bnx2_gunzip_end(bp);
3088         return rc;
3089 }
3090
3091 static int
3092 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3093 {
3094         u16 pmcsr;
3095
3096         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3097
3098         switch (state) {
3099         case PCI_D0: {
3100                 u32 val;
3101
3102                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3103                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3104                         PCI_PM_CTRL_PME_STATUS);
3105
3106                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3107                         /* delay required during transition out of D3hot */
3108                         msleep(20);
3109
3110                 val = REG_RD(bp, BNX2_EMAC_MODE);
3111                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3112                 val &= ~BNX2_EMAC_MODE_MPKT;
3113                 REG_WR(bp, BNX2_EMAC_MODE, val);
3114
3115                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3116                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3117                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3118                 break;
3119         }
3120         case PCI_D3hot: {
3121                 int i;
3122                 u32 val, wol_msg;
3123
3124                 if (bp->wol) {
3125                         u32 advertising;
3126                         u8 autoneg;
3127
3128                         autoneg = bp->autoneg;
3129                         advertising = bp->advertising;
3130
3131                         bp->autoneg = AUTONEG_SPEED;
3132                         bp->advertising = ADVERTISED_10baseT_Half |
3133                                 ADVERTISED_10baseT_Full |
3134                                 ADVERTISED_100baseT_Half |
3135                                 ADVERTISED_100baseT_Full |
3136                                 ADVERTISED_Autoneg;
3137
3138                         bnx2_setup_copper_phy(bp);
3139
3140                         bp->autoneg = autoneg;
3141                         bp->advertising = advertising;
3142
3143                         bnx2_set_mac_addr(bp);
3144
3145                         val = REG_RD(bp, BNX2_EMAC_MODE);
3146
3147                         /* Enable port mode. */
3148                         val &= ~BNX2_EMAC_MODE_PORT;
3149                         val |= BNX2_EMAC_MODE_PORT_MII |
3150                                BNX2_EMAC_MODE_MPKT_RCVD |
3151                                BNX2_EMAC_MODE_ACPI_RCVD |
3152                                BNX2_EMAC_MODE_MPKT;
3153
3154                         REG_WR(bp, BNX2_EMAC_MODE, val);
3155
3156                         /* receive all multicast */
3157                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3158                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3159                                        0xffffffff);
3160                         }
3161                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3162                                BNX2_EMAC_RX_MODE_SORT_MODE);
3163
3164                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3165                               BNX2_RPM_SORT_USER0_MC_EN;
3166                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3167                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3168                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3169                                BNX2_RPM_SORT_USER0_ENA);
3170
3171                         /* Need to enable EMAC and RPM for WOL. */
3172                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3173                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3174                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3175                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3176
3177                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3178                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3179                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3180
3181                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3182                 }
3183                 else {
3184                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3185                 }
3186
3187                 if (!(bp->flags & NO_WOL_FLAG))
3188                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3189
3190                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3191                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3192                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3193
3194                         if (bp->wol)
3195                                 pmcsr |= 3;
3196                 }
3197                 else {
3198                         pmcsr |= 3;
3199                 }
3200                 if (bp->wol) {
3201                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3202                 }
3203                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3204                                       pmcsr);
3205
3206                 /* No more memory access after this point until
3207                  * device is brought back to D0.
3208                  */
3209                 udelay(50);
3210                 break;
3211         }
3212         default:
3213                 return -EINVAL;
3214         }
3215         return 0;
3216 }
3217
3218 static int
3219 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3220 {
3221         u32 val;
3222         int j;
3223
3224         /* Request access to the flash interface. */
3225         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3226         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3227                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3228                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3229                         break;
3230
3231                 udelay(5);
3232         }
3233
3234         if (j >= NVRAM_TIMEOUT_COUNT)
3235                 return -EBUSY;
3236
3237         return 0;
3238 }
3239
3240 static int
3241 bnx2_release_nvram_lock(struct bnx2 *bp)
3242 {
3243         int j;
3244         u32 val;
3245
3246         /* Relinquish nvram interface. */
3247         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3248
3249         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3252                         break;
3253
3254                 udelay(5);
3255         }
3256
3257         if (j >= NVRAM_TIMEOUT_COUNT)
3258                 return -EBUSY;
3259
3260         return 0;
3261 }
3262
3263
3264 static int
3265 bnx2_enable_nvram_write(struct bnx2 *bp)
3266 {
3267         u32 val;
3268
3269         val = REG_RD(bp, BNX2_MISC_CFG);
3270         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3271
3272         if (!bp->flash_info->buffered) {
3273                 int j;
3274
3275                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3276                 REG_WR(bp, BNX2_NVM_COMMAND,
3277                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3278
3279                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3280                         udelay(5);
3281
3282                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3283                         if (val & BNX2_NVM_COMMAND_DONE)
3284                                 break;
3285                 }
3286
3287                 if (j >= NVRAM_TIMEOUT_COUNT)
3288                         return -EBUSY;
3289         }
3290         return 0;
3291 }
3292
3293 static void
3294 bnx2_disable_nvram_write(struct bnx2 *bp)
3295 {
3296         u32 val;
3297
3298         val = REG_RD(bp, BNX2_MISC_CFG);
3299         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3300 }
3301
3302
3303 static void
3304 bnx2_enable_nvram_access(struct bnx2 *bp)
3305 {
3306         u32 val;
3307
3308         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3309         /* Enable both bits, even on read. */
3310         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3311                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3312 }
3313
3314 static void
3315 bnx2_disable_nvram_access(struct bnx2 *bp)
3316 {
3317         u32 val;
3318
3319         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3320         /* Disable both bits, even after read. */
3321         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3322                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3323                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3324 }
3325
3326 static int
3327 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3328 {
3329         u32 cmd;
3330         int j;
3331
3332         if (bp->flash_info->buffered)
3333                 /* Buffered flash, no erase needed */
3334                 return 0;
3335
3336         /* Build an erase command */
3337         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3338               BNX2_NVM_COMMAND_DOIT;
3339
3340         /* Need to clear DONE bit separately. */
3341         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3342
3343         /* Address of the NVRAM to read from. */
3344         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3345
3346         /* Issue an erase command. */
3347         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3348
3349         /* Wait for completion. */
3350         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3351                 u32 val;
3352
3353                 udelay(5);
3354
3355                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3356                 if (val & BNX2_NVM_COMMAND_DONE)
3357                         break;
3358         }
3359
3360         if (j >= NVRAM_TIMEOUT_COUNT)
3361                 return -EBUSY;
3362
3363         return 0;
3364 }
3365
3366 static int
3367 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3368 {
3369         u32 cmd;
3370         int j;
3371
3372         /* Build the command word. */
3373         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3374
3375         /* Calculate an offset of a buffered flash. */
3376         if (bp->flash_info->buffered) {
3377                 offset = ((offset / bp->flash_info->page_size) <<
3378                            bp->flash_info->page_bits) +
3379                           (offset % bp->flash_info->page_size);
3380         }
3381
3382         /* Need to clear DONE bit separately. */
3383         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3384
3385         /* Address of the NVRAM to read from. */
3386         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3387
3388         /* Issue a read command. */
3389         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3390
3391         /* Wait for completion. */
3392         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3393                 u32 val;
3394
3395                 udelay(5);
3396
3397                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3398                 if (val & BNX2_NVM_COMMAND_DONE) {
3399                         val = REG_RD(bp, BNX2_NVM_READ);
3400
3401                         val = be32_to_cpu(val);
3402                         memcpy(ret_val, &val, 4);
3403                         break;
3404                 }
3405         }
3406         if (j >= NVRAM_TIMEOUT_COUNT)
3407                 return -EBUSY;
3408
3409         return 0;
3410 }
3411
3412
3413 static int
3414 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3415 {
3416         u32 cmd, val32;
3417         int j;
3418
3419         /* Build the command word. */
3420         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3421
3422         /* Calculate an offset of a buffered flash. */
3423         if (bp->flash_info->buffered) {
3424                 offset = ((offset / bp->flash_info->page_size) <<
3425                           bp->flash_info->page_bits) +
3426                          (offset % bp->flash_info->page_size);
3427         }
3428
3429         /* Need to clear DONE bit separately. */
3430         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3431
3432         memcpy(&val32, val, 4);
3433         val32 = cpu_to_be32(val32);
3434
3435         /* Write the data. */
3436         REG_WR(bp, BNX2_NVM_WRITE, val32);
3437
3438         /* Address of the NVRAM to write to. */
3439         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3440
3441         /* Issue the write command. */
3442         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3443
3444         /* Wait for completion. */
3445         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3446                 udelay(5);
3447
3448                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3449                         break;
3450         }
3451         if (j >= NVRAM_TIMEOUT_COUNT)
3452                 return -EBUSY;
3453
3454         return 0;
3455 }
3456
3457 static int
3458 bnx2_init_nvram(struct bnx2 *bp)
3459 {
3460         u32 val;
3461         int j, entry_count, rc;
3462         struct flash_spec *flash;
3463
3464         /* Determine the selected interface. */
3465         val = REG_RD(bp, BNX2_NVM_CFG1);
3466
3467         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3468
3469         rc = 0;
3470         if (val & 0x40000000) {
3471
3472                 /* Flash interface has been reconfigured */
3473                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3474                      j++, flash++) {
3475                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3476                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3477                                 bp->flash_info = flash;
3478                                 break;
3479                         }
3480                 }
3481         }
3482         else {
3483                 u32 mask;
3484                 /* Not yet been reconfigured */
3485
3486                 if (val & (1 << 23))
3487                         mask = FLASH_BACKUP_STRAP_MASK;
3488                 else
3489                         mask = FLASH_STRAP_MASK;
3490
3491                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3492                         j++, flash++) {
3493
3494                         if ((val & mask) == (flash->strapping & mask)) {
3495                                 bp->flash_info = flash;
3496
3497                                 /* Request access to the flash interface. */
3498                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3499                                         return rc;
3500
3501                                 /* Enable access to flash interface */
3502                                 bnx2_enable_nvram_access(bp);
3503
3504                                 /* Reconfigure the flash interface */
3505                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3506                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3507                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3508                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3509
3510                                 /* Disable access to flash interface */
3511                                 bnx2_disable_nvram_access(bp);
3512                                 bnx2_release_nvram_lock(bp);
3513
3514                                 break;
3515                         }
3516                 }
3517         } /* if (val & 0x40000000) */
3518
3519         if (j == entry_count) {
3520                 bp->flash_info = NULL;
3521                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3522                 return -ENODEV;
3523         }
3524
3525         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3526         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3527         if (val)
3528                 bp->flash_size = val;
3529         else
3530                 bp->flash_size = bp->flash_info->total_size;
3531
3532         return rc;
3533 }
3534
3535 static int
3536 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3537                 int buf_size)
3538 {
3539         int rc = 0;
3540         u32 cmd_flags, offset32, len32, extra;
3541
3542         if (buf_size == 0)
3543                 return 0;
3544
3545         /* Request access to the flash interface. */
3546         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3547                 return rc;
3548
3549         /* Enable access to flash interface */
3550         bnx2_enable_nvram_access(bp);
3551
3552         len32 = buf_size;
3553         offset32 = offset;
3554         extra = 0;
3555
3556         cmd_flags = 0;
3557
3558         if (offset32 & 3) {
3559                 u8 buf[4];
3560                 u32 pre_len;
3561
3562                 offset32 &= ~3;
3563                 pre_len = 4 - (offset & 3);
3564
3565                 if (pre_len >= len32) {
3566                         pre_len = len32;
3567                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3568                                     BNX2_NVM_COMMAND_LAST;
3569                 }
3570                 else {
3571                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3572                 }
3573
3574                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576                 if (rc)
3577                         return rc;
3578
3579                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3580
3581                 offset32 += 4;
3582                 ret_buf += pre_len;
3583                 len32 -= pre_len;
3584         }
3585         if (len32 & 3) {
3586                 extra = 4 - (len32 & 3);
3587                 len32 = (len32 + 4) & ~3;
3588         }
3589
3590         if (len32 == 4) {
3591                 u8 buf[4];
3592
3593                 if (cmd_flags)
3594                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3595                 else
3596                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3597                                     BNX2_NVM_COMMAND_LAST;
3598
3599                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3600
3601                 memcpy(ret_buf, buf, 4 - extra);
3602         }
3603         else if (len32 > 0) {
3604                 u8 buf[4];
3605
3606                 /* Read the first word. */
3607                 if (cmd_flags)
3608                         cmd_flags = 0;
3609                 else
3610                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3611
3612                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3613
3614                 /* Advance to the next dword. */
3615                 offset32 += 4;
3616                 ret_buf += 4;
3617                 len32 -= 4;
3618
3619                 while (len32 > 4 && rc == 0) {
3620                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3621
3622                         /* Advance to the next dword. */
3623                         offset32 += 4;
3624                         ret_buf += 4;
3625                         len32 -= 4;
3626                 }
3627
3628                 if (rc)
3629                         return rc;
3630
3631                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3632                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3633
3634                 memcpy(ret_buf, buf, 4 - extra);
3635         }
3636
3637         /* Disable access to flash interface */
3638         bnx2_disable_nvram_access(bp);
3639
3640         bnx2_release_nvram_lock(bp);
3641
3642         return rc;
3643 }
3644
3645 static int
3646 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3647                 int buf_size)
3648 {
3649         u32 written, offset32, len32;
3650         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3651         int rc = 0;
3652         int align_start, align_end;
3653
3654         buf = data_buf;
3655         offset32 = offset;
3656         len32 = buf_size;
3657         align_start = align_end = 0;
3658
3659         if ((align_start = (offset32 & 3))) {
3660                 offset32 &= ~3;
3661                 len32 += align_start;
3662                 if (len32 < 4)
3663                         len32 = 4;
3664                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3665                         return rc;
3666         }
3667
3668         if (len32 & 3) {
3669                 align_end = 4 - (len32 & 3);
3670                 len32 += align_end;
3671                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3672                         return rc;
3673         }
3674
3675         if (align_start || align_end) {
3676                 align_buf = kmalloc(len32, GFP_KERNEL);
3677                 if (align_buf == NULL)
3678                         return -ENOMEM;
3679                 if (align_start) {
3680                         memcpy(align_buf, start, 4);
3681                 }
3682                 if (align_end) {
3683                         memcpy(align_buf + len32 - 4, end, 4);
3684                 }
3685                 memcpy(align_buf + align_start, data_buf, buf_size);
3686                 buf = align_buf;
3687         }
3688
3689         if (bp->flash_info->buffered == 0) {
3690                 flash_buffer = kmalloc(264, GFP_KERNEL);
3691                 if (flash_buffer == NULL) {
3692                         rc = -ENOMEM;
3693                         goto nvram_write_end;
3694                 }
3695         }
3696
3697         written = 0;
3698         while ((written < len32) && (rc == 0)) {
3699                 u32 page_start, page_end, data_start, data_end;
3700                 u32 addr, cmd_flags;
3701                 int i;
3702
3703                 /* Find the page_start addr */
3704                 page_start = offset32 + written;
3705                 page_start -= (page_start % bp->flash_info->page_size);
3706                 /* Find the page_end addr */
3707                 page_end = page_start + bp->flash_info->page_size;
3708                 /* Find the data_start addr */
3709                 data_start = (written == 0) ? offset32 : page_start;
3710                 /* Find the data_end addr */
3711                 data_end = (page_end > offset32 + len32) ?
3712                         (offset32 + len32) : page_end;
3713
3714                 /* Request access to the flash interface. */
3715                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3716                         goto nvram_write_end;
3717
3718                 /* Enable access to flash interface */
3719                 bnx2_enable_nvram_access(bp);
3720
3721                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3722                 if (bp->flash_info->buffered == 0) {
3723                         int j;
3724
3725                         /* Read the whole page into the buffer
3726                          * (non-buffer flash only) */
3727                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3728                                 if (j == (bp->flash_info->page_size - 4)) {
3729                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3730                                 }
3731                                 rc = bnx2_nvram_read_dword(bp,
3732                                         page_start + j,
3733                                         &flash_buffer[j],
3734                                         cmd_flags);
3735
3736                                 if (rc)
3737                                         goto nvram_write_end;
3738
3739                                 cmd_flags = 0;
3740                         }
3741                 }
3742
3743                 /* Enable writes to flash interface (unlock write-protect) */
3744                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3745                         goto nvram_write_end;
3746
3747                 /* Loop to write back the buffer data from page_start to
3748                  * data_start */
3749                 i = 0;
3750                 if (bp->flash_info->buffered == 0) {
3751                         /* Erase the page */
3752                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3753                                 goto nvram_write_end;
3754
3755                         /* Re-enable the write again for the actual write */
3756                         bnx2_enable_nvram_write(bp);
3757
3758                         for (addr = page_start; addr < data_start;
3759                                 addr += 4, i += 4) {
3760
3761                                 rc = bnx2_nvram_write_dword(bp, addr,
3762                                         &flash_buffer[i], cmd_flags);
3763
3764                                 if (rc != 0)
3765                                         goto nvram_write_end;
3766
3767                                 cmd_flags = 0;
3768                         }
3769                 }
3770
3771                 /* Loop to write the new data from data_start to data_end */
3772                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3773                         if ((addr == page_end - 4) ||
3774                                 ((bp->flash_info->buffered) &&
3775                                  (addr == data_end - 4))) {
3776
3777                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3778                         }
3779                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3780                                 cmd_flags);
3781
3782                         if (rc != 0)
3783                                 goto nvram_write_end;
3784
3785                         cmd_flags = 0;
3786                         buf += 4;
3787                 }
3788
3789                 /* Loop to write back the buffer data from data_end
3790                  * to page_end */
3791                 if (bp->flash_info->buffered == 0) {
3792                         for (addr = data_end; addr < page_end;
3793                                 addr += 4, i += 4) {
3794
3795                                 if (addr == page_end-4) {
3796                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3797                                 }
3798                                 rc = bnx2_nvram_write_dword(bp, addr,
3799                                         &flash_buffer[i], cmd_flags);
3800
3801                                 if (rc != 0)
3802                                         goto nvram_write_end;
3803
3804                                 cmd_flags = 0;
3805                         }
3806                 }
3807
3808                 /* Disable writes to flash interface (lock write-protect) */
3809                 bnx2_disable_nvram_write(bp);
3810
3811                 /* Disable access to flash interface */
3812                 bnx2_disable_nvram_access(bp);
3813                 bnx2_release_nvram_lock(bp);
3814
3815                 /* Increment written */
3816                 written += data_end - data_start;
3817         }
3818
3819 nvram_write_end:
3820         kfree(flash_buffer);
3821         kfree(align_buf);
3822         return rc;
3823 }
3824
3825 static void
3826 bnx2_init_remote_phy(struct bnx2 *bp)
3827 {
3828         u32 val;
3829
3830         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3831         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3832                 return;
3833
3834         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3835         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3836                 return;
3837
3838         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3839                 if (netif_running(bp->dev)) {
3840                         val = BNX2_DRV_ACK_CAP_SIGNATURE |
3841                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3842                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3843                                    val);
3844                 }
3845                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3846
3847                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3848                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3849                         bp->phy_port = PORT_FIBRE;
3850                 else
3851                         bp->phy_port = PORT_TP;
3852         }
3853 }
3854
3855 static int
3856 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3857 {
3858         u32 val;
3859         int i, rc = 0;
3860
3861         /* Wait for the current PCI transaction to complete before
3862          * issuing a reset. */
3863         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3864                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3865                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3866                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3867                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3868         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3869         udelay(5);
3870
3871         /* Wait for the firmware to tell us it is ok to issue a reset. */
3872         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3873
3874         /* Deposit a driver reset signature so the firmware knows that
3875          * this is a soft reset. */
3876         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3877                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3878
3879         /* Do a dummy read to force the chip to complete all current transaction
3880          * before we issue a reset. */
3881         val = REG_RD(bp, BNX2_MISC_ID);
3882
3883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3885                 REG_RD(bp, BNX2_MISC_COMMAND);
3886                 udelay(5);
3887
3888                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3889                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3890
3891                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3892
3893         } else {
3894                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3895                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3896                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3897
3898                 /* Chip reset. */
3899                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3900
3901                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3902                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3903                         current->state = TASK_UNINTERRUPTIBLE;
3904                         schedule_timeout(HZ / 50);
3905                 }
3906
3907                 /* Reset takes approximate 30 usec */
3908                 for (i = 0; i < 10; i++) {
3909                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3910                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3911                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3912                                 break;
3913                         udelay(10);
3914                 }
3915
3916                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3917                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3918                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3919                         return -EBUSY;
3920                 }
3921         }
3922
3923         /* Make sure byte swapping is properly configured. */
3924         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3925         if (val != 0x01020304) {
3926                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3927                 return -ENODEV;
3928         }
3929
3930         /* Wait for the firmware to finish its initialization. */
3931         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3932         if (rc)
3933                 return rc;
3934
3935         spin_lock_bh(&bp->phy_lock);
3936         bnx2_init_remote_phy(bp);
3937         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3938                 bnx2_set_default_remote_link(bp);
3939         spin_unlock_bh(&bp->phy_lock);
3940
3941         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3942                 /* Adjust the voltage regular to two steps lower.  The default
3943                  * of this register is 0x0000000e. */
3944                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3945
3946                 /* Remove bad rbuf memory from the free pool. */
3947                 rc = bnx2_alloc_bad_rbuf(bp);
3948         }
3949
3950         return rc;
3951 }
3952
3953 static int
3954 bnx2_init_chip(struct bnx2 *bp)
3955 {
3956         u32 val;
3957         int rc;
3958
3959         /* Make sure the interrupt is not active. */
3960         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3961
3962         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3963               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3964 #ifdef __BIG_ENDIAN
3965               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3966 #endif
3967               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3968               DMA_READ_CHANS << 12 |
3969               DMA_WRITE_CHANS << 16;
3970
3971         val |= (0x2 << 20) | (1 << 11);
3972
3973         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3974                 val |= (1 << 23);
3975
3976         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3977             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3978                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3979
3980         REG_WR(bp, BNX2_DMA_CONFIG, val);
3981
3982         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3983                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3984                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3985                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3986         }
3987
3988         if (bp->flags & PCIX_FLAG) {
3989                 u16 val16;
3990
3991                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3992                                      &val16);
3993                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3994                                       val16 & ~PCI_X_CMD_ERO);
3995         }
3996
3997         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3998                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3999                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4000                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4001
4002         /* Initialize context mapping and zero out the quick contexts.  The
4003          * context block must have already been enabled. */
4004         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4005                 rc = bnx2_init_5709_context(bp);
4006                 if (rc)
4007                         return rc;
4008         } else
4009                 bnx2_init_context(bp);
4010
4011         if ((rc = bnx2_init_cpus(bp)) != 0)
4012                 return rc;
4013
4014         bnx2_init_nvram(bp);
4015
4016         bnx2_set_mac_addr(bp);
4017
4018         val = REG_RD(bp, BNX2_MQ_CONFIG);
4019         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4020         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4021         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4022                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4023
4024         REG_WR(bp, BNX2_MQ_CONFIG, val);
4025
4026         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4027         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4028         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4029
4030         val = (BCM_PAGE_BITS - 8) << 24;
4031         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4032
4033         /* Configure page size. */
4034         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4035         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4036         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4037         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4038
4039         val = bp->mac_addr[0] +
4040               (bp->mac_addr[1] << 8) +
4041               (bp->mac_addr[2] << 16) +
4042               bp->mac_addr[3] +
4043               (bp->mac_addr[4] << 8) +
4044               (bp->mac_addr[5] << 16);
4045         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4046
4047         /* Program the MTU.  Also include 4 bytes for CRC32. */
4048         val = bp->dev->mtu + ETH_HLEN + 4;
4049         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4050                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4051         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4052
4053         bp->last_status_idx = 0;
4054         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4055
4056         /* Set up how to generate a link change interrupt. */
4057         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4058
4059         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4060                (u64) bp->status_blk_mapping & 0xffffffff);
4061         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4062
4063         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4064                (u64) bp->stats_blk_mapping & 0xffffffff);
4065         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4066                (u64) bp->stats_blk_mapping >> 32);
4067
4068         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4069                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4070
4071         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4072                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4073
4074         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4075                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4076
4077         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4078
4079         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4080
4081         REG_WR(bp, BNX2_HC_COM_TICKS,
4082                (bp->com_ticks_int << 16) | bp->com_ticks);
4083
4084         REG_WR(bp, BNX2_HC_CMD_TICKS,
4085                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4086
4087         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4088                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4089         else
4090                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4091         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4092
4093         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4094                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4095         else {
4096                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4097                       BNX2_HC_CONFIG_COLLECT_STATS;
4098         }
4099
4100         if (bp->flags & ONE_SHOT_MSI_FLAG)
4101                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4102
4103         REG_WR(bp, BNX2_HC_CONFIG, val);
4104
4105         /* Clear internal stats counters. */
4106         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4107
4108         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4109
4110         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4111             BNX2_PORT_FEATURE_ASF_ENABLED)
4112                 bp->flags |= ASF_ENABLE_FLAG;
4113
4114         /* Initialize the receive filter. */
4115         bnx2_set_rx_mode(bp->dev);
4116
4117         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4118                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4119                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4120                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4121         }
4122         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4123                           0);
4124
4125         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4126         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4127
4128         udelay(20);
4129
4130         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4131
4132         return rc;
4133 }
4134
4135 static void
4136 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4137 {
4138         u32 val, offset0, offset1, offset2, offset3;
4139
4140         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4141                 offset0 = BNX2_L2CTX_TYPE_XI;
4142                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4143                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4144                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4145         } else {
4146                 offset0 = BNX2_L2CTX_TYPE;
4147                 offset1 = BNX2_L2CTX_CMD_TYPE;
4148                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4149                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4150         }
4151         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4152         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4153
4154         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4155         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4156
4157         val = (u64) bp->tx_desc_mapping >> 32;
4158         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4159
4160         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4161         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4162 }
4163
4164 static void
4165 bnx2_init_tx_ring(struct bnx2 *bp)
4166 {
4167         struct tx_bd *txbd;
4168         u32 cid;
4169
4170         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4171
4172         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4173
4174         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4175         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4176
4177         bp->tx_prod = 0;
4178         bp->tx_cons = 0;
4179         bp->hw_tx_cons = 0;
4180         bp->tx_prod_bseq = 0;
4181
4182         cid = TX_CID;
4183         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4184         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4185
4186         bnx2_init_tx_context(bp, cid);
4187 }
4188
4189 static void
4190 bnx2_init_rx_ring(struct bnx2 *bp)
4191 {
4192         struct rx_bd *rxbd;
4193         int i;
4194         u16 prod, ring_prod;
4195         u32 val;
4196
4197         /* 8 for CRC and VLAN */
4198         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4199         /* hw alignment */
4200         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4201
4202         ring_prod = prod = bp->rx_prod = 0;
4203         bp->rx_cons = 0;
4204         bp->hw_rx_cons = 0;
4205         bp->rx_prod_bseq = 0;
4206
4207         for (i = 0; i < bp->rx_max_ring; i++) {
4208                 int j;
4209
4210                 rxbd = &bp->rx_desc_ring[i][0];
4211                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4212                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4213                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4214                 }
4215                 if (i == (bp->rx_max_ring - 1))
4216                         j = 0;
4217                 else
4218                         j = i + 1;
4219                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4220                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4221                                        0xffffffff;
4222         }
4223
4224         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4225         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4226         val |= 0x02 << 8;
4227         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4228
4229         val = (u64) bp->rx_desc_mapping[0] >> 32;
4230         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4231
4232         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4233         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4234
4235         for (i = 0; i < bp->rx_ring_size; i++) {
4236                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4237                         break;
4238                 }
4239                 prod = NEXT_RX_BD(prod);
4240                 ring_prod = RX_RING_IDX(prod);
4241         }
4242         bp->rx_prod = prod;
4243
4244         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4245
4246         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4247 }
4248
4249 static void
4250 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4251 {
4252         u32 num_rings, max;
4253
4254         bp->rx_ring_size = size;
4255         num_rings = 1;
4256         while (size > MAX_RX_DESC_CNT) {
4257                 size -= MAX_RX_DESC_CNT;
4258                 num_rings++;
4259         }
4260         /* round to next power of 2 */
4261         max = MAX_RX_RINGS;
4262         while ((max & num_rings) == 0)
4263                 max >>= 1;
4264
4265         if (num_rings != max)
4266                 max <<= 1;
4267
4268         bp->rx_max_ring = max;
4269         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4270 }
4271
4272 static void
4273 bnx2_free_tx_skbs(struct bnx2 *bp)
4274 {
4275         int i;
4276
4277         if (bp->tx_buf_ring == NULL)
4278                 return;
4279
4280         for (i = 0; i < TX_DESC_CNT; ) {
4281                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4282                 struct sk_buff *skb = tx_buf->skb;
4283                 int j, last;
4284
4285                 if (skb == NULL) {
4286                         i++;
4287                         continue;
4288                 }
4289
4290                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4291                         skb_headlen(skb), PCI_DMA_TODEVICE);
4292
4293                 tx_buf->skb = NULL;
4294
4295                 last = skb_shinfo(skb)->nr_frags;
4296                 for (j = 0; j < last; j++) {
4297                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4298                         pci_unmap_page(bp->pdev,
4299                                 pci_unmap_addr(tx_buf, mapping),
4300                                 skb_shinfo(skb)->frags[j].size,
4301                                 PCI_DMA_TODEVICE);
4302                 }
4303                 dev_kfree_skb(skb);
4304                 i += j + 1;
4305         }
4306
4307 }
4308
4309 static void
4310 bnx2_free_rx_skbs(struct bnx2 *bp)
4311 {
4312         int i;
4313
4314         if (bp->rx_buf_ring == NULL)
4315                 return;
4316
4317         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4318                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4319                 struct sk_buff *skb = rx_buf->skb;
4320
4321                 if (skb == NULL)
4322                         continue;
4323
4324                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4325                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4326
4327                 rx_buf->skb = NULL;
4328
4329                 dev_kfree_skb(skb);
4330         }
4331 }
4332
4333 static void
4334 bnx2_free_skbs(struct bnx2 *bp)
4335 {
4336         bnx2_free_tx_skbs(bp);
4337         bnx2_free_rx_skbs(bp);
4338 }
4339
4340 static int
4341 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4342 {
4343         int rc;
4344
4345         rc = bnx2_reset_chip(bp, reset_code);
4346         bnx2_free_skbs(bp);
4347         if (rc)
4348                 return rc;
4349
4350         if ((rc = bnx2_init_chip(bp)) != 0)
4351                 return rc;
4352
4353         bnx2_init_tx_ring(bp);
4354         bnx2_init_rx_ring(bp);
4355         return 0;
4356 }
4357
4358 static int
4359 bnx2_init_nic(struct bnx2 *bp)
4360 {
4361         int rc;
4362
4363         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4364                 return rc;
4365
4366         spin_lock_bh(&bp->phy_lock);
4367         bnx2_init_phy(bp);
4368         bnx2_set_link(bp);
4369         spin_unlock_bh(&bp->phy_lock);
4370         return 0;
4371 }
4372
4373 static int
4374 bnx2_test_registers(struct bnx2 *bp)
4375 {
4376         int ret;
4377         int i, is_5709;
4378         static const struct {
4379                 u16   offset;
4380                 u16   flags;
4381 #define BNX2_FL_NOT_5709        1
4382                 u32   rw_mask;
4383                 u32   ro_mask;
4384         } reg_tbl[] = {
4385                 { 0x006c, 0, 0x00000000, 0x0000003f },
4386                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4387                 { 0x0094, 0, 0x00000000, 0x00000000 },
4388
4389                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4390                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4393                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4394                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4395                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4396                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4397                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4398
4399                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4400                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4401                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4402                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4403                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4404                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4405
4406                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4407                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4408                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4409
4410                 { 0x1000, 0, 0x00000000, 0x00000001 },
4411                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4412
4413                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4414                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4415                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4416                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4417                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4418                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4419                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4420                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4421                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4422                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4423
4424                 { 0x1800, 0, 0x00000000, 0x00000001 },
4425                 { 0x1804, 0, 0x00000000, 0x00000003 },
4426
4427                 { 0x2800, 0, 0x00000000, 0x00000001 },
4428                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4429                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4430                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4431                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4432                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4433                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4434                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4435                 { 0x2840, 0, 0x00000000, 0xffffffff },
4436                 { 0x2844, 0, 0x00000000, 0xffffffff },
4437                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4438                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4439
4440                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4441                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4442
4443                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4444                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4445                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4446                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4447                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4448                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4449                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4450                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4451                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4452
4453                 { 0x5004, 0, 0x00000000, 0x0000007f },
4454                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4455
4456                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4457                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4458                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4459                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4460                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4461                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4462                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4463                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4464                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4465
4466                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4467                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4468                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4469                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4470                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4471                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4472                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4473                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4474                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4475                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4476                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4477                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4478                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4479                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4480                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4481                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4482                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4483                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4484                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4485                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4486                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4487                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4488                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4489
4490                 { 0xffff, 0, 0x00000000, 0x00000000 },
4491         };
4492
4493         ret = 0;
4494         is_5709 = 0;
4495         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4496                 is_5709 = 1;
4497
4498         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4499                 u32 offset, rw_mask, ro_mask, save_val, val;
4500                 u16 flags = reg_tbl[i].flags;
4501
4502                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4503                         continue;
4504
4505                 offset = (u32) reg_tbl[i].offset;
4506                 rw_mask = reg_tbl[i].rw_mask;
4507                 ro_mask = reg_tbl[i].ro_mask;
4508
4509                 save_val = readl(bp->regview + offset);
4510
4511                 writel(0, bp->regview + offset);
4512
4513                 val = readl(bp->regview + offset);
4514                 if ((val & rw_mask) != 0) {
4515                         goto reg_test_err;
4516                 }
4517
4518                 if ((val & ro_mask) != (save_val & ro_mask)) {
4519                         goto reg_test_err;
4520                 }
4521
4522                 writel(0xffffffff, bp->regview + offset);
4523
4524                 val = readl(bp->regview + offset);
4525                 if ((val & rw_mask) != rw_mask) {
4526                         goto reg_test_err;
4527                 }
4528
4529                 if ((val & ro_mask) != (save_val & ro_mask)) {
4530                         goto reg_test_err;
4531                 }
4532
4533                 writel(save_val, bp->regview + offset);
4534                 continue;
4535
4536 reg_test_err:
4537                 writel(save_val, bp->regview + offset);
4538                 ret = -ENODEV;
4539                 break;
4540         }
4541         return ret;
4542 }
4543
4544 static int
4545 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4546 {
4547         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4548                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4549         int i;
4550
4551         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4552                 u32 offset;
4553
4554                 for (offset = 0; offset < size; offset += 4) {
4555
4556                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4557
4558                         if (REG_RD_IND(bp, start + offset) !=
4559                                 test_pattern[i]) {
4560                                 return -ENODEV;
4561                         }
4562                 }
4563         }
4564         return 0;
4565 }
4566
4567 static int
4568 bnx2_test_memory(struct bnx2 *bp)
4569 {
4570         int ret = 0;
4571         int i;
4572         static struct mem_entry {
4573                 u32   offset;
4574                 u32   len;
4575         } mem_tbl_5706[] = {
4576                 { 0x60000,  0x4000 },
4577                 { 0xa0000,  0x3000 },
4578                 { 0xe0000,  0x4000 },
4579                 { 0x120000, 0x4000 },
4580                 { 0x1a0000, 0x4000 },
4581                 { 0x160000, 0x4000 },
4582                 { 0xffffffff, 0    },
4583         },
4584         mem_tbl_5709[] = {
4585                 { 0x60000,  0x4000 },
4586                 { 0xa0000,  0x3000 },
4587                 { 0xe0000,  0x4000 },
4588                 { 0x120000, 0x4000 },
4589                 { 0x1a0000, 0x4000 },
4590                 { 0xffffffff, 0    },
4591         };
4592         struct mem_entry *mem_tbl;
4593
4594         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4595                 mem_tbl = mem_tbl_5709;
4596         else
4597                 mem_tbl = mem_tbl_5706;
4598
4599         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4600                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4601                         mem_tbl[i].len)) != 0) {
4602                         return ret;
4603                 }
4604         }
4605
4606         return ret;
4607 }
4608
4609 #define BNX2_MAC_LOOPBACK       0
4610 #define BNX2_PHY_LOOPBACK       1
4611
4612 static int
4613 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4614 {
4615         unsigned int pkt_size, num_pkts, i;
4616         struct sk_buff *skb, *rx_skb;
4617         unsigned char *packet;
4618         u16 rx_start_idx, rx_idx;
4619         dma_addr_t map;
4620         struct tx_bd *txbd;
4621         struct sw_bd *rx_buf;
4622         struct l2_fhdr *rx_hdr;
4623         int ret = -ENODEV;
4624
4625         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4626                 bp->loopback = MAC_LOOPBACK;
4627                 bnx2_set_mac_loopback(bp);
4628         }
4629         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4630                 bp->loopback = PHY_LOOPBACK;
4631                 bnx2_set_phy_loopback(bp);
4632         }
4633         else
4634                 return -EINVAL;
4635
4636         pkt_size = 1514;
4637         skb = netdev_alloc_skb(bp->dev, pkt_size);
4638         if (!skb)
4639                 return -ENOMEM;
4640         packet = skb_put(skb, pkt_size);
4641         memcpy(packet, bp->dev->dev_addr, 6);
4642         memset(packet + 6, 0x0, 8);
4643         for (i = 14; i < pkt_size; i++)
4644                 packet[i] = (unsigned char) (i & 0xff);
4645
4646         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4647                 PCI_DMA_TODEVICE);
4648
4649         REG_WR(bp, BNX2_HC_COMMAND,
4650                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4651
4652         REG_RD(bp, BNX2_HC_COMMAND);
4653
4654         udelay(5);
4655         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4656
4657         num_pkts = 0;
4658
4659         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4660
4661         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4662         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4663         txbd->tx_bd_mss_nbytes = pkt_size;
4664         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4665
4666         num_pkts++;
4667         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4668         bp->tx_prod_bseq += pkt_size;
4669
4670         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4671         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4672
4673         udelay(100);
4674
4675         REG_WR(bp, BNX2_HC_COMMAND,
4676                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4677
4678         REG_RD(bp, BNX2_HC_COMMAND);
4679
4680         udelay(5);
4681
4682         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4683         dev_kfree_skb(skb);
4684
4685         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4686                 goto loopback_test_done;
4687         }
4688
4689         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4690         if (rx_idx != rx_start_idx + num_pkts) {
4691                 goto loopback_test_done;
4692         }
4693
4694         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4695         rx_skb = rx_buf->skb;
4696
4697         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4698         skb_reserve(rx_skb, bp->rx_offset);
4699
4700         pci_dma_sync_single_for_cpu(bp->pdev,
4701                 pci_unmap_addr(rx_buf, mapping),
4702                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4703
4704         if (rx_hdr->l2_fhdr_status &
4705                 (L2_FHDR_ERRORS_BAD_CRC |
4706                 L2_FHDR_ERRORS_PHY_DECODE |
4707                 L2_FHDR_ERRORS_ALIGNMENT |
4708                 L2_FHDR_ERRORS_TOO_SHORT |
4709                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4710
4711                 goto loopback_test_done;
4712         }
4713
4714         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4715                 goto loopback_test_done;
4716         }
4717
4718         for (i = 14; i < pkt_size; i++) {
4719                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4720                         goto loopback_test_done;
4721                 }
4722         }
4723
4724         ret = 0;
4725
4726 loopback_test_done:
4727         bp->loopback = 0;
4728         return ret;
4729 }
4730
4731 #define BNX2_MAC_LOOPBACK_FAILED        1
4732 #define BNX2_PHY_LOOPBACK_FAILED        2
4733 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4734                                          BNX2_PHY_LOOPBACK_FAILED)
4735
4736 static int
4737 bnx2_test_loopback(struct bnx2 *bp)
4738 {
4739         int rc = 0;
4740
4741         if (!netif_running(bp->dev))
4742                 return BNX2_LOOPBACK_FAILED;
4743
4744         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4745         spin_lock_bh(&bp->phy_lock);
4746         bnx2_init_phy(bp);
4747         spin_unlock_bh(&bp->phy_lock);
4748         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4749                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4750         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4751                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4752         return rc;
4753 }
4754
4755 #define NVRAM_SIZE 0x200
4756 #define CRC32_RESIDUAL 0xdebb20e3
4757
4758 static int
4759 bnx2_test_nvram(struct bnx2 *bp)
4760 {
4761         u32 buf[NVRAM_SIZE / 4];
4762         u8 *data = (u8 *) buf;
4763         int rc = 0;
4764         u32 magic, csum;
4765
4766         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4767                 goto test_nvram_done;
4768
4769         magic = be32_to_cpu(buf[0]);
4770         if (magic != 0x669955aa) {
4771                 rc = -ENODEV;
4772                 goto test_nvram_done;
4773         }
4774
4775         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4776                 goto test_nvram_done;
4777
4778         csum = ether_crc_le(0x100, data);
4779         if (csum != CRC32_RESIDUAL) {
4780                 rc = -ENODEV;
4781                 goto test_nvram_done;
4782         }
4783
4784         csum = ether_crc_le(0x100, data + 0x100);
4785         if (csum != CRC32_RESIDUAL) {
4786                 rc = -ENODEV;
4787         }
4788
4789 test_nvram_done:
4790         return rc;
4791 }
4792
4793 static int
4794 bnx2_test_link(struct bnx2 *bp)
4795 {
4796         u32 bmsr;
4797
4798         spin_lock_bh(&bp->phy_lock);
4799         bnx2_enable_bmsr1(bp);
4800         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4801         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4802         bnx2_disable_bmsr1(bp);
4803         spin_unlock_bh(&bp->phy_lock);
4804
4805         if (bmsr & BMSR_LSTATUS) {
4806                 return 0;
4807         }
4808         return -ENODEV;
4809 }
4810
4811 static int
4812 bnx2_test_intr(struct bnx2 *bp)
4813 {
4814         int i;
4815         u16 status_idx;
4816
4817         if (!netif_running(bp->dev))
4818                 return -ENODEV;
4819
4820         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4821
4822         /* This register is not touched during run-time. */
4823         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4824         REG_RD(bp, BNX2_HC_COMMAND);
4825
4826         for (i = 0; i < 10; i++) {
4827                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4828                         status_idx) {
4829
4830                         break;
4831                 }
4832
4833                 msleep_interruptible(10);
4834         }
4835         if (i < 10)
4836                 return 0;
4837
4838         return -ENODEV;
4839 }
4840
4841 static void
4842 bnx2_5706_serdes_timer(struct bnx2 *bp)
4843 {
4844         spin_lock(&bp->phy_lock);
4845         if (bp->serdes_an_pending)
4846                 bp->serdes_an_pending--;
4847         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4848                 u32 bmcr;
4849
4850                 bp->current_interval = bp->timer_interval;
4851
4852                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4853
4854                 if (bmcr & BMCR_ANENABLE) {
4855                         u32 phy1, phy2;
4856
4857                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4858                         bnx2_read_phy(bp, 0x1c, &phy1);
4859
4860                         bnx2_write_phy(bp, 0x17, 0x0f01);
4861                         bnx2_read_phy(bp, 0x15, &phy2);
4862                         bnx2_write_phy(bp, 0x17, 0x0f01);
4863                         bnx2_read_phy(bp, 0x15, &phy2);
4864
4865                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4866                                 !(phy2 & 0x20)) {       /* no CONFIG */
4867
4868                                 bmcr &= ~BMCR_ANENABLE;
4869                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4870                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4871                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4872                         }
4873                 }
4874         }
4875         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4876                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4877                 u32 phy2;
4878
4879                 bnx2_write_phy(bp, 0x17, 0x0f01);
4880                 bnx2_read_phy(bp, 0x15, &phy2);
4881                 if (phy2 & 0x20) {
4882                         u32 bmcr;
4883
4884                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4885                         bmcr |= BMCR_ANENABLE;
4886                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4887
4888                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4889                 }
4890         } else
4891                 bp->current_interval = bp->timer_interval;
4892
4893         spin_unlock(&bp->phy_lock);
4894 }
4895
4896 static void
4897 bnx2_5708_serdes_timer(struct bnx2 *bp)
4898 {
4899         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4900                 return;
4901
4902         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4903                 bp->serdes_an_pending = 0;
4904                 return;
4905         }
4906
4907         spin_lock(&bp->phy_lock);
4908         if (bp->serdes_an_pending)
4909                 bp->serdes_an_pending--;
4910         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4911                 u32 bmcr;
4912
4913                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4914                 if (bmcr & BMCR_ANENABLE) {
4915                         bnx2_enable_forced_2g5(bp);
4916                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4917                 } else {
4918                         bnx2_disable_forced_2g5(bp);
4919                         bp->serdes_an_pending = 2;
4920                         bp->current_interval = bp->timer_interval;
4921                 }
4922
4923         } else
4924                 bp->current_interval = bp->timer_interval;
4925
4926         spin_unlock(&bp->phy_lock);
4927 }
4928
4929 static void
4930 bnx2_timer(unsigned long data)
4931 {
4932         struct bnx2 *bp = (struct bnx2 *) data;
4933         u32 msg;
4934
4935         if (!netif_running(bp->dev))
4936                 return;
4937
4938         if (atomic_read(&bp->intr_sem) != 0)
4939                 goto bnx2_restart_timer;
4940
4941         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4942         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4943
4944         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4945
4946         /* workaround occasional corrupted counters */
4947         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4948                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4949                                             BNX2_HC_COMMAND_STATS_NOW);
4950
4951         if (bp->phy_flags & PHY_SERDES_FLAG) {
4952                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4953                         bnx2_5706_serdes_timer(bp);
4954                 else
4955                         bnx2_5708_serdes_timer(bp);
4956         }
4957
4958 bnx2_restart_timer:
4959         mod_timer(&bp->timer, jiffies + bp->current_interval);
4960 }
4961
4962 static int
4963 bnx2_request_irq(struct bnx2 *bp)
4964 {
4965         struct net_device *dev = bp->dev;
4966         int rc = 0;
4967
4968         if (bp->flags & USING_MSI_FLAG) {
4969                 irq_handler_t   fn = bnx2_msi;
4970
4971                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4972                         fn = bnx2_msi_1shot;
4973
4974                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4975         } else
4976                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4977                                  IRQF_SHARED, dev->name, dev);
4978         return rc;
4979 }
4980
4981 static void
4982 bnx2_free_irq(struct bnx2 *bp)
4983 {
4984         struct net_device *dev = bp->dev;
4985
4986         if (bp->flags & USING_MSI_FLAG) {
4987                 free_irq(bp->pdev->irq, dev);
4988                 pci_disable_msi(bp->pdev);
4989                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4990         } else
4991                 free_irq(bp->pdev->irq, dev);
4992 }
4993
4994 /* Called with rtnl_lock */
4995 static int
4996 bnx2_open(struct net_device *dev)
4997 {
4998         struct bnx2 *bp = netdev_priv(dev);
4999         int rc;
5000
5001         netif_carrier_off(dev);
5002
5003         bnx2_set_power_state(bp, PCI_D0);
5004         bnx2_disable_int(bp);
5005
5006         rc = bnx2_alloc_mem(bp);
5007         if (rc)
5008                 return rc;
5009
5010         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5011                 if (pci_enable_msi(bp->pdev) == 0) {
5012                         bp->flags |= USING_MSI_FLAG;
5013                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5014                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5015                 }
5016         }
5017         rc = bnx2_request_irq(bp);
5018
5019         if (rc) {
5020                 bnx2_free_mem(bp);
5021                 return rc;
5022         }
5023
5024         rc = bnx2_init_nic(bp);
5025
5026         if (rc) {
5027                 bnx2_free_irq(bp);
5028                 bnx2_free_skbs(bp);
5029                 bnx2_free_mem(bp);
5030                 return rc;
5031         }
5032
5033         mod_timer(&bp->timer, jiffies + bp->current_interval);
5034
5035         atomic_set(&bp->intr_sem, 0);
5036
5037         bnx2_enable_int(bp);
5038
5039         if (bp->flags & USING_MSI_FLAG) {
5040                 /* Test MSI to make sure it is working
5041                  * If MSI test fails, go back to INTx mode
5042                  */
5043                 if (bnx2_test_intr(bp) != 0) {
5044                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5045                                " using MSI, switching to INTx mode. Please"
5046                                " report this failure to the PCI maintainer"
5047                                " and include system chipset information.\n",
5048                                bp->dev->name);
5049
5050                         bnx2_disable_int(bp);
5051                         bnx2_free_irq(bp);
5052
5053                         rc = bnx2_init_nic(bp);
5054
5055                         if (!rc)
5056                                 rc = bnx2_request_irq(bp);
5057
5058                         if (rc) {
5059                                 bnx2_free_skbs(bp);
5060                                 bnx2_free_mem(bp);
5061                                 del_timer_sync(&bp->timer);
5062                                 return rc;
5063                         }
5064                         bnx2_enable_int(bp);
5065                 }
5066         }
5067         if (bp->flags & USING_MSI_FLAG) {
5068                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5069         }
5070
5071         netif_start_queue(dev);
5072
5073         return 0;
5074 }
5075
5076 static void
5077 bnx2_reset_task(struct work_struct *work)
5078 {
5079         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5080
5081         if (!netif_running(bp->dev))
5082                 return;
5083
5084         bp->in_reset_task = 1;
5085         bnx2_netif_stop(bp);
5086
5087         bnx2_init_nic(bp);
5088
5089         atomic_set(&bp->intr_sem, 1);
5090         bnx2_netif_start(bp);
5091         bp->in_reset_task = 0;
5092 }
5093
5094 static void
5095 bnx2_tx_timeout(struct net_device *dev)
5096 {
5097         struct bnx2 *bp = netdev_priv(dev);
5098
5099         /* This allows the netif to be shutdown gracefully before resetting */
5100         schedule_work(&bp->reset_task);
5101 }
5102
5103 #ifdef BCM_VLAN
5104 /* Called with rtnl_lock */
5105 static void
5106 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5107 {
5108         struct bnx2 *bp = netdev_priv(dev);
5109
5110         bnx2_netif_stop(bp);
5111
5112         bp->vlgrp = vlgrp;
5113         bnx2_set_rx_mode(dev);
5114
5115         bnx2_netif_start(bp);
5116 }
5117 #endif
5118
5119 /* Called with netif_tx_lock.
5120  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5121  * netif_wake_queue().
5122  */
5123 static int
5124 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5125 {
5126         struct bnx2 *bp = netdev_priv(dev);
5127         dma_addr_t mapping;
5128         struct tx_bd *txbd;
5129         struct sw_bd *tx_buf;
5130         u32 len, vlan_tag_flags, last_frag, mss;
5131         u16 prod, ring_prod;
5132         int i;
5133
5134         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5135                 netif_stop_queue(dev);
5136                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5137                         dev->name);
5138
5139                 return NETDEV_TX_BUSY;
5140         }
5141         len = skb_headlen(skb);
5142         prod = bp->tx_prod;
5143         ring_prod = TX_RING_IDX(prod);
5144
5145         vlan_tag_flags = 0;
5146         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5147                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5148         }
5149
5150         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5151                 vlan_tag_flags |=
5152                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5153         }
5154         if ((mss = skb_shinfo(skb)->gso_size)) {
5155                 u32 tcp_opt_len, ip_tcp_len;
5156                 struct iphdr *iph;
5157
5158                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5159
5160                 tcp_opt_len = tcp_optlen(skb);
5161
5162                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5163                         u32 tcp_off = skb_transport_offset(skb) -
5164                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5165
5166                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5167                                           TX_BD_FLAGS_SW_FLAGS;
5168                         if (likely(tcp_off == 0))
5169                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5170                         else {
5171                                 tcp_off >>= 3;
5172                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5173                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5174                                                   ((tcp_off & 0x10) <<
5175                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5176                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5177                         }
5178                 } else {
5179                         if (skb_header_cloned(skb) &&
5180                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5181                                 dev_kfree_skb(skb);
5182                                 return NETDEV_TX_OK;
5183                         }
5184
5185                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5186
5187                         iph = ip_hdr(skb);
5188                         iph->check = 0;
5189                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5190                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5191                                                                  iph->daddr, 0,
5192                                                                  IPPROTO_TCP,
5193                                                                  0);
5194                         if (tcp_opt_len || (iph->ihl > 5)) {
5195                                 vlan_tag_flags |= ((iph->ihl - 5) +
5196                                                    (tcp_opt_len >> 2)) << 8;
5197                         }
5198                 }
5199         } else
5200                 mss = 0;
5201
5202         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5203
5204         tx_buf = &bp->tx_buf_ring[ring_prod];
5205         tx_buf->skb = skb;
5206         pci_unmap_addr_set(tx_buf, mapping, mapping);
5207
5208         txbd = &bp->tx_desc_ring[ring_prod];
5209
5210         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5211         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5212         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5213         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5214
5215         last_frag = skb_shinfo(skb)->nr_frags;
5216
5217         for (i = 0; i < last_frag; i++) {
5218                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5219
5220                 prod = NEXT_TX_BD(prod);
5221                 ring_prod = TX_RING_IDX(prod);
5222                 txbd = &bp->tx_desc_ring[ring_prod];
5223
5224                 len = frag->size;
5225                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5226                         len, PCI_DMA_TODEVICE);
5227                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5228                                 mapping, mapping);
5229
5230                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5231                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5232                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5233                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5234
5235         }
5236         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5237
5238         prod = NEXT_TX_BD(prod);
5239         bp->tx_prod_bseq += skb->len;
5240
5241         REG_WR16(bp, bp->tx_bidx_addr, prod);
5242         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5243
5244         mmiowb();
5245
5246         bp->tx_prod = prod;
5247         dev->trans_start = jiffies;
5248
5249         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5250                 netif_stop_queue(dev);
5251                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5252                         netif_wake_queue(dev);
5253         }
5254
5255         return NETDEV_TX_OK;
5256 }
5257
5258 /* Called with rtnl_lock */
5259 static int
5260 bnx2_close(struct net_device *dev)
5261 {
5262         struct bnx2 *bp = netdev_priv(dev);
5263         u32 reset_code;
5264
5265         /* Calling flush_scheduled_work() may deadlock because
5266          * linkwatch_event() may be on the workqueue and it will try to get
5267          * the rtnl_lock which we are holding.
5268          */
5269         while (bp->in_reset_task)
5270                 msleep(1);
5271
5272         bnx2_netif_stop(bp);
5273         del_timer_sync(&bp->timer);
5274         if (bp->flags & NO_WOL_FLAG)
5275                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5276         else if (bp->wol)
5277                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5278         else
5279                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5280         bnx2_reset_chip(bp, reset_code);
5281         bnx2_free_irq(bp);
5282         bnx2_free_skbs(bp);
5283         bnx2_free_mem(bp);
5284         bp->link_up = 0;
5285         netif_carrier_off(bp->dev);
5286         bnx2_set_power_state(bp, PCI_D3hot);
5287         return 0;
5288 }
5289
5290 #define GET_NET_STATS64(ctr)                                    \
5291         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5292         (unsigned long) (ctr##_lo)
5293
5294 #define GET_NET_STATS32(ctr)            \
5295         (ctr##_lo)
5296
5297 #if (BITS_PER_LONG == 64)
5298 #define GET_NET_STATS   GET_NET_STATS64
5299 #else
5300 #define GET_NET_STATS   GET_NET_STATS32
5301 #endif
5302
5303 static struct net_device_stats *
5304 bnx2_get_stats(struct net_device *dev)
5305 {
5306         struct bnx2 *bp = netdev_priv(dev);
5307         struct statistics_block *stats_blk = bp->stats_blk;
5308         struct net_device_stats *net_stats = &bp->net_stats;
5309
5310         if (bp->stats_blk == NULL) {
5311                 return net_stats;
5312         }
5313         net_stats->rx_packets =
5314                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5315                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5316                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5317
5318         net_stats->tx_packets =
5319                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5320                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5321                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5322
5323         net_stats->rx_bytes =
5324                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5325
5326         net_stats->tx_bytes =
5327                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5328
5329         net_stats->multicast =
5330                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5331
5332         net_stats->collisions =
5333                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5334
5335         net_stats->rx_length_errors =
5336                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5337                 stats_blk->stat_EtherStatsOverrsizePkts);
5338
5339         net_stats->rx_over_errors =
5340                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5341
5342         net_stats->rx_frame_errors =
5343                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5344
5345         net_stats->rx_crc_errors =
5346                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5347
5348         net_stats->rx_errors = net_stats->rx_length_errors +
5349                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5350                 net_stats->rx_crc_errors;
5351
5352         net_stats->tx_aborted_errors =
5353                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5354                 stats_blk->stat_Dot3StatsLateCollisions);
5355
5356         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5357             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5358                 net_stats->tx_carrier_errors = 0;
5359         else {
5360                 net_stats->tx_carrier_errors =
5361                         (unsigned long)
5362                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5363         }
5364
5365         net_stats->tx_errors =
5366                 (unsigned long)
5367                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5368                 +
5369                 net_stats->tx_aborted_errors +
5370                 net_stats->tx_carrier_errors;
5371
5372         net_stats->rx_missed_errors =
5373                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5374                 stats_blk->stat_FwRxDrop);
5375
5376         return net_stats;
5377 }
5378
5379 /* All ethtool functions called with rtnl_lock */
5380
5381 static int
5382 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5383 {
5384         struct bnx2 *bp = netdev_priv(dev);
5385         int support_serdes = 0, support_copper = 0;
5386
5387         cmd->supported = SUPPORTED_Autoneg;
5388         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5389                 support_serdes = 1;
5390                 support_copper = 1;
5391         } else if (bp->phy_port == PORT_FIBRE)
5392                 support_serdes = 1;
5393         else
5394                 support_copper = 1;
5395
5396         if (support_serdes) {
5397                 cmd->supported |= SUPPORTED_1000baseT_Full |
5398                         SUPPORTED_FIBRE;
5399                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5400                         cmd->supported |= SUPPORTED_2500baseX_Full;
5401
5402         }
5403         if (support_copper) {
5404                 cmd->supported |= SUPPORTED_10baseT_Half |
5405                         SUPPORTED_10baseT_Full |
5406                         SUPPORTED_100baseT_Half |
5407                         SUPPORTED_100baseT_Full |
5408                         SUPPORTED_1000baseT_Full |
5409                         SUPPORTED_TP;
5410
5411         }
5412
5413         spin_lock_bh(&bp->phy_lock);
5414         cmd->port = bp->phy_port;
5415         cmd->advertising = bp->advertising;
5416
5417         if (bp->autoneg & AUTONEG_SPEED) {
5418                 cmd->autoneg = AUTONEG_ENABLE;
5419         }
5420         else {
5421                 cmd->autoneg = AUTONEG_DISABLE;
5422         }
5423
5424         if (netif_carrier_ok(dev)) {
5425                 cmd->speed = bp->line_speed;
5426                 cmd->duplex = bp->duplex;
5427         }
5428         else {
5429                 cmd->speed = -1;
5430                 cmd->duplex = -1;
5431         }
5432         spin_unlock_bh(&bp->phy_lock);
5433
5434         cmd->transceiver = XCVR_INTERNAL;
5435         cmd->phy_address = bp->phy_addr;
5436
5437         return 0;
5438 }
5439
5440 static int
5441 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5442 {
5443         struct bnx2 *bp = netdev_priv(dev);
5444         u8 autoneg = bp->autoneg;
5445         u8 req_duplex = bp->req_duplex;
5446         u16 req_line_speed = bp->req_line_speed;
5447         u32 advertising = bp->advertising;
5448         int err = -EINVAL;
5449
5450         spin_lock_bh(&bp->phy_lock);
5451
5452         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5453                 goto err_out_unlock;
5454
5455         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5456                 goto err_out_unlock;
5457
5458         if (cmd->autoneg == AUTONEG_ENABLE) {
5459                 autoneg |= AUTONEG_SPEED;
5460
5461                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5462
5463                 /* allow advertising 1 speed */
5464                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5465                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5466                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5467                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5468
5469                         if (cmd->port == PORT_FIBRE)
5470                                 goto err_out_unlock;
5471
5472                         advertising = cmd->advertising;
5473
5474                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5475                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5476                             (cmd->port == PORT_TP))
5477                                 goto err_out_unlock;
5478                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5479                         advertising = cmd->advertising;
5480                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5481                         goto err_out_unlock;
5482                 else {
5483                         if (cmd->port == PORT_FIBRE)
5484                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5485                         else
5486                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5487                 }
5488                 advertising |= ADVERTISED_Autoneg;
5489         }
5490         else {
5491                 if (cmd->port == PORT_FIBRE) {
5492                         if ((cmd->speed != SPEED_1000 &&
5493                              cmd->speed != SPEED_2500) ||
5494                             (cmd->duplex != DUPLEX_FULL))
5495                                 goto err_out_unlock;
5496
5497                         if (cmd->speed == SPEED_2500 &&
5498                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5499                                 goto err_out_unlock;
5500                 }
5501                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5502                         goto err_out_unlock;
5503
5504                 autoneg &= ~AUTONEG_SPEED;
5505                 req_line_speed = cmd->speed;
5506                 req_duplex = cmd->duplex;
5507                 advertising = 0;
5508         }
5509
5510         bp->autoneg = autoneg;
5511         bp->advertising = advertising;
5512         bp->req_line_speed = req_line_speed;
5513         bp->req_duplex = req_duplex;
5514
5515         err = bnx2_setup_phy(bp, cmd->port);
5516
5517 err_out_unlock:
5518         spin_unlock_bh(&bp->phy_lock);
5519
5520         return err;
5521 }
5522
5523 static void
5524 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5525 {
5526         struct bnx2 *bp = netdev_priv(dev);
5527
5528         strcpy(info->driver, DRV_MODULE_NAME);
5529         strcpy(info->version, DRV_MODULE_VERSION);
5530         strcpy(info->bus_info, pci_name(bp->pdev));
5531         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5532         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5533         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5534         info->fw_version[1] = info->fw_version[3] = '.';
5535         info->fw_version[5] = 0;
5536 }
5537
5538 #define BNX2_REGDUMP_LEN                (32 * 1024)
5539
5540 static int
5541 bnx2_get_regs_len(struct net_device *dev)
5542 {
5543         return BNX2_REGDUMP_LEN;
5544 }
5545
5546 static void
5547 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5548 {
5549         u32 *p = _p, i, offset;
5550         u8 *orig_p = _p;
5551         struct bnx2 *bp = netdev_priv(dev);
5552         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5553                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5554                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5555                                  0x1040, 0x1048, 0x1080, 0x10a4,
5556                                  0x1400, 0x1490, 0x1498, 0x14f0,
5557                                  0x1500, 0x155c, 0x1580, 0x15dc,
5558                                  0x1600, 0x1658, 0x1680, 0x16d8,
5559                                  0x1800, 0x1820, 0x1840, 0x1854,
5560                                  0x1880, 0x1894, 0x1900, 0x1984,
5561                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5562                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5563                                  0x2000, 0x2030, 0x23c0, 0x2400,
5564                                  0x2800, 0x2820, 0x2830, 0x2850,
5565                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5566                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5567                                  0x4080, 0x4090, 0x43c0, 0x4458,
5568                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5569                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5570                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5571                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5572                                  0x6800, 0x6848, 0x684c, 0x6860,
5573                                  0x6888, 0x6910, 0x8000 };
5574
5575         regs->version = 0;
5576
5577         memset(p, 0, BNX2_REGDUMP_LEN);
5578
5579         if (!netif_running(bp->dev))
5580                 return;
5581
5582         i = 0;
5583         offset = reg_boundaries[0];
5584         p += offset;
5585         while (offset < BNX2_REGDUMP_LEN) {
5586                 *p++ = REG_RD(bp, offset);
5587                 offset += 4;
5588                 if (offset == reg_boundaries[i + 1]) {
5589                         offset = reg_boundaries[i + 2];
5590                         p = (u32 *) (orig_p + offset);
5591                         i += 2;
5592                 }
5593         }
5594 }
5595
5596 static void
5597 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5598 {
5599         struct bnx2 *bp = netdev_priv(dev);
5600
5601         if (bp->flags & NO_WOL_FLAG) {
5602                 wol->supported = 0;
5603                 wol->wolopts = 0;
5604         }
5605         else {
5606                 wol->supported = WAKE_MAGIC;
5607                 if (bp->wol)
5608                         wol->wolopts = WAKE_MAGIC;
5609                 else
5610                         wol->wolopts = 0;
5611         }
5612         memset(&wol->sopass, 0, sizeof(wol->sopass));
5613 }
5614
5615 static int
5616 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5617 {
5618         struct bnx2 *bp = netdev_priv(dev);
5619
5620         if (wol->wolopts & ~WAKE_MAGIC)
5621                 return -EINVAL;
5622
5623         if (wol->wolopts & WAKE_MAGIC) {
5624                 if (bp->flags & NO_WOL_FLAG)
5625                         return -EINVAL;
5626
5627                 bp->wol = 1;
5628         }
5629         else {
5630                 bp->wol = 0;
5631         }
5632         return 0;
5633 }
5634
5635 static int
5636 bnx2_nway_reset(struct net_device *dev)
5637 {
5638         struct bnx2 *bp = netdev_priv(dev);
5639         u32 bmcr;
5640
5641         if (!(bp->autoneg & AUTONEG_SPEED)) {
5642                 return -EINVAL;
5643         }
5644
5645         spin_lock_bh(&bp->phy_lock);
5646
5647         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5648                 int rc;
5649
5650                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5651                 spin_unlock_bh(&bp->phy_lock);
5652                 return rc;
5653         }
5654
5655         /* Force a link down visible on the other side */
5656         if (bp->phy_flags & PHY_SERDES_FLAG) {
5657                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5658                 spin_unlock_bh(&bp->phy_lock);
5659
5660                 msleep(20);
5661
5662                 spin_lock_bh(&bp->phy_lock);
5663
5664                 bp->current_interval = SERDES_AN_TIMEOUT;
5665                 bp->serdes_an_pending = 1;
5666                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5667         }
5668
5669         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5670         bmcr &= ~BMCR_LOOPBACK;
5671         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5672
5673         spin_unlock_bh(&bp->phy_lock);
5674
5675         return 0;
5676 }
5677
5678 static int
5679 bnx2_get_eeprom_len(struct net_device *dev)
5680 {
5681         struct bnx2 *bp = netdev_priv(dev);
5682
5683         if (bp->flash_info == NULL)
5684                 return 0;
5685
5686         return (int) bp->flash_size;
5687 }
5688
5689 static int
5690 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5691                 u8 *eebuf)
5692 {
5693         struct bnx2 *bp = netdev_priv(dev);
5694         int rc;
5695
5696         /* parameters already validated in ethtool_get_eeprom */
5697
5698         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5699
5700         return rc;
5701 }
5702
5703 static int
5704 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5705                 u8 *eebuf)
5706 {
5707         struct bnx2 *bp = netdev_priv(dev);
5708         int rc;
5709
5710         /* parameters already validated in ethtool_set_eeprom */
5711
5712         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5713
5714         return rc;
5715 }
5716
5717 static int
5718 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721
5722         memset(coal, 0, sizeof(struct ethtool_coalesce));
5723
5724         coal->rx_coalesce_usecs = bp->rx_ticks;
5725         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5726         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5727         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5728
5729         coal->tx_coalesce_usecs = bp->tx_ticks;
5730         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5731         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5732         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5733
5734         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5735
5736         return 0;
5737 }
5738
5739 static int
5740 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5741 {
5742         struct bnx2 *bp = netdev_priv(dev);
5743
5744         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5745         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5746
5747         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5748         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5749
5750         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5751         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5752
5753         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5754         if (bp->rx_quick_cons_trip_int > 0xff)
5755                 bp->rx_quick_cons_trip_int = 0xff;
5756
5757         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5758         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5759
5760         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5761         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5762
5763         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5764         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5765
5766         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5767         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5768                 0xff;
5769
5770         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5771         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5772                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5773                         bp->stats_ticks = USEC_PER_SEC;
5774         }
5775         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5776         bp->stats_ticks &= 0xffff00;
5777
5778         if (netif_running(bp->dev)) {
5779                 bnx2_netif_stop(bp);
5780                 bnx2_init_nic(bp);
5781                 bnx2_netif_start(bp);
5782         }
5783
5784         return 0;
5785 }
5786
5787 static void
5788 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5789 {
5790         struct bnx2 *bp = netdev_priv(dev);
5791
5792         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5793         ering->rx_mini_max_pending = 0;
5794         ering->rx_jumbo_max_pending = 0;
5795
5796         ering->rx_pending = bp->rx_ring_size;
5797         ering->rx_mini_pending = 0;
5798         ering->rx_jumbo_pending = 0;
5799
5800         ering->tx_max_pending = MAX_TX_DESC_CNT;
5801         ering->tx_pending = bp->tx_ring_size;
5802 }
5803
5804 static int
5805 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5806 {
5807         struct bnx2 *bp = netdev_priv(dev);
5808
5809         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5810                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5811                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5812
5813                 return -EINVAL;
5814         }
5815         if (netif_running(bp->dev)) {
5816                 bnx2_netif_stop(bp);
5817                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5818                 bnx2_free_skbs(bp);
5819                 bnx2_free_mem(bp);
5820         }
5821
5822         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5823         bp->tx_ring_size = ering->tx_pending;
5824
5825         if (netif_running(bp->dev)) {
5826                 int rc;
5827
5828                 rc = bnx2_alloc_mem(bp);
5829                 if (rc)
5830                         return rc;
5831                 bnx2_init_nic(bp);
5832                 bnx2_netif_start(bp);
5833         }
5834
5835         return 0;
5836 }
5837
5838 static void
5839 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5840 {
5841         struct bnx2 *bp = netdev_priv(dev);
5842
5843         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5844         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5845         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5846 }
5847
5848 static int
5849 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5850 {
5851         struct bnx2 *bp = netdev_priv(dev);
5852
5853         bp->req_flow_ctrl = 0;
5854         if (epause->rx_pause)
5855                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5856         if (epause->tx_pause)
5857                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5858
5859         if (epause->autoneg) {
5860                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5861         }
5862         else {
5863                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5864         }
5865
5866         spin_lock_bh(&bp->phy_lock);
5867
5868         bnx2_setup_phy(bp, bp->phy_port);
5869
5870         spin_unlock_bh(&bp->phy_lock);
5871
5872         return 0;
5873 }
5874
5875 static u32
5876 bnx2_get_rx_csum(struct net_device *dev)
5877 {
5878         struct bnx2 *bp = netdev_priv(dev);
5879
5880         return bp->rx_csum;
5881 }
5882
5883 static int
5884 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5885 {
5886         struct bnx2 *bp = netdev_priv(dev);
5887
5888         bp->rx_csum = data;
5889         return 0;
5890 }
5891
5892 static int
5893 bnx2_set_tso(struct net_device *dev, u32 data)
5894 {
5895         struct bnx2 *bp = netdev_priv(dev);
5896
5897         if (data) {
5898                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5899                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5900                         dev->features |= NETIF_F_TSO6;
5901         } else
5902                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5903                                    NETIF_F_TSO_ECN);
5904         return 0;
5905 }
5906
5907 #define BNX2_NUM_STATS 46
5908
5909 static struct {
5910         char string[ETH_GSTRING_LEN];
5911 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5912         { "rx_bytes" },
5913         { "rx_error_bytes" },
5914         { "tx_bytes" },
5915         { "tx_error_bytes" },
5916         { "rx_ucast_packets" },
5917         { "rx_mcast_packets" },
5918         { "rx_bcast_packets" },
5919         { "tx_ucast_packets" },
5920         { "tx_mcast_packets" },
5921         { "tx_bcast_packets" },
5922         { "tx_mac_errors" },
5923         { "tx_carrier_errors" },
5924         { "rx_crc_errors" },
5925         { "rx_align_errors" },
5926         { "tx_single_collisions" },
5927         { "tx_multi_collisions" },
5928         { "tx_deferred" },
5929         { "tx_excess_collisions" },
5930         { "tx_late_collisions" },
5931         { "tx_total_collisions" },
5932         { "rx_fragments" },
5933         { "rx_jabbers" },
5934         { "rx_undersize_packets" },
5935         { "rx_oversize_packets" },
5936         { "rx_64_byte_packets" },
5937         { "rx_65_to_127_byte_packets" },
5938         { "rx_128_to_255_byte_packets" },
5939         { "rx_256_to_511_byte_packets" },
5940         { "rx_512_to_1023_byte_packets" },
5941         { "rx_1024_to_1522_byte_packets" },
5942         { "rx_1523_to_9022_byte_packets" },
5943         { "tx_64_byte_packets" },
5944         { "tx_65_to_127_byte_packets" },
5945         { "tx_128_to_255_byte_packets" },
5946         { "tx_256_to_511_byte_packets" },
5947         { "tx_512_to_1023_byte_packets" },
5948         { "tx_1024_to_1522_byte_packets" },
5949         { "tx_1523_to_9022_byte_packets" },
5950         { "rx_xon_frames" },
5951         { "rx_xoff_frames" },
5952         { "tx_xon_frames" },
5953         { "tx_xoff_frames" },
5954         { "rx_mac_ctrl_frames" },
5955         { "rx_filtered_packets" },
5956         { "rx_discards" },
5957         { "rx_fw_discards" },
5958 };
5959
5960 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5961
5962 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5963     STATS_OFFSET32(stat_IfHCInOctets_hi),
5964     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5965     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5966     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5967     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5968     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5969     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5970     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5971     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5972     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5973     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5974     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5975     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5976     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5977     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5978     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5979     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5980     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5981     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5982     STATS_OFFSET32(stat_EtherStatsCollisions),
5983     STATS_OFFSET32(stat_EtherStatsFragments),
5984     STATS_OFFSET32(stat_EtherStatsJabbers),
5985     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5986     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5987     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5988     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5989     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5990     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5991     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5992     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5993     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5994     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5995     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5996     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5997     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5998     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5999     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6000     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6001     STATS_OFFSET32(stat_XonPauseFramesReceived),
6002     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6003     STATS_OFFSET32(stat_OutXonSent),
6004     STATS_OFFSET32(stat_OutXoffSent),
6005     STATS_OFFSET32(stat_MacControlFramesReceived),
6006     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6007     STATS_OFFSET32(stat_IfInMBUFDiscards),
6008     STATS_OFFSET32(stat_FwRxDrop),
6009 };
6010
6011 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6012  * skipped because of errata.
6013  */
6014 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6015         8,0,8,8,8,8,8,8,8,8,
6016         4,0,4,4,4,4,4,4,4,4,
6017         4,4,4,4,4,4,4,4,4,4,
6018         4,4,4,4,4,4,4,4,4,4,
6019         4,4,4,4,4,4,
6020 };
6021
6022 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6023         8,0,8,8,8,8,8,8,8,8,
6024         4,4,4,4,4,4,4,4,4,4,
6025         4,4,4,4,4,4,4,4,4,4,
6026         4,4,4,4,4,4,4,4,4,4,
6027         4,4,4,4,4,4,
6028 };
6029
6030 #define BNX2_NUM_TESTS 6
6031
6032 static struct {
6033         char string[ETH_GSTRING_LEN];
6034 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6035         { "register_test (offline)" },
6036         { "memory_test (offline)" },
6037         { "loopback_test (offline)" },
6038         { "nvram_test (online)" },
6039         { "interrupt_test (online)" },
6040         { "link_test (online)" },
6041 };
6042
6043 static int
6044 bnx2_self_test_count(struct net_device *dev)
6045 {
6046         return BNX2_NUM_TESTS;
6047 }
6048
6049 static void
6050 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051 {
6052         struct bnx2 *bp = netdev_priv(dev);
6053
6054         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6056                 int i;
6057
6058                 bnx2_netif_stop(bp);
6059                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060                 bnx2_free_skbs(bp);
6061
6062                 if (bnx2_test_registers(bp) != 0) {
6063                         buf[0] = 1;
6064                         etest->flags |= ETH_TEST_FL_FAILED;
6065                 }
6066                 if (bnx2_test_memory(bp) != 0) {
6067                         buf[1] = 1;
6068                         etest->flags |= ETH_TEST_FL_FAILED;
6069                 }
6070                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6071                         etest->flags |= ETH_TEST_FL_FAILED;
6072
6073                 if (!netif_running(bp->dev)) {
6074                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075                 }
6076                 else {
6077                         bnx2_init_nic(bp);
6078                         bnx2_netif_start(bp);
6079                 }
6080
6081                 /* wait for link up */
6082                 for (i = 0; i < 7; i++) {
6083                         if (bp->link_up)
6084                                 break;
6085                         msleep_interruptible(1000);
6086                 }
6087         }
6088
6089         if (bnx2_test_nvram(bp) != 0) {
6090                 buf[3] = 1;
6091                 etest->flags |= ETH_TEST_FL_FAILED;
6092         }
6093         if (bnx2_test_intr(bp) != 0) {
6094                 buf[4] = 1;
6095                 etest->flags |= ETH_TEST_FL_FAILED;
6096         }
6097
6098         if (bnx2_test_link(bp) != 0) {
6099                 buf[5] = 1;
6100                 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102         }
6103 }
6104
6105 static void
6106 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107 {
6108         switch (stringset) {
6109         case ETH_SS_STATS:
6110                 memcpy(buf, bnx2_stats_str_arr,
6111                         sizeof(bnx2_stats_str_arr));
6112                 break;
6113         case ETH_SS_TEST:
6114                 memcpy(buf, bnx2_tests_str_arr,
6115                         sizeof(bnx2_tests_str_arr));
6116                 break;
6117         }
6118 }
6119
6120 static int
6121 bnx2_get_stats_count(struct net_device *dev)
6122 {
6123         return BNX2_NUM_STATS;
6124 }
6125
6126 static void
6127 bnx2_get_ethtool_stats(struct net_device *dev,
6128                 struct ethtool_stats *stats, u64 *buf)
6129 {
6130         struct bnx2 *bp = netdev_priv(dev);
6131         int i;
6132         u32 *hw_stats = (u32 *) bp->stats_blk;
6133         u8 *stats_len_arr = NULL;
6134
6135         if (hw_stats == NULL) {
6136                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6137                 return;
6138         }
6139
6140         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6141             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6142             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6143             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6144                 stats_len_arr = bnx2_5706_stats_len_arr;
6145         else
6146                 stats_len_arr = bnx2_5708_stats_len_arr;
6147
6148         for (i = 0; i < BNX2_NUM_STATS; i++) {
6149                 if (stats_len_arr[i] == 0) {
6150                         /* skip this counter */
6151                         buf[i] = 0;
6152                         continue;
6153                 }
6154                 if (stats_len_arr[i] == 4) {
6155                         /* 4-byte counter */
6156                         buf[i] = (u64)