[BNX2]: Add bnx2_set_default_link().
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.11"
58 #define DRV_MODULE_RELDATE      "June 4, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->link_up) {
554                 u32 bmsr;
555
556                 switch (bp->line_speed) {
557                 case SPEED_10:
558                         if (bp->duplex == DUPLEX_HALF)
559                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
560                         else
561                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
562                         break;
563                 case SPEED_100:
564                         if (bp->duplex == DUPLEX_HALF)
565                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
566                         else
567                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
568                         break;
569                 case SPEED_1000:
570                         if (bp->duplex == DUPLEX_HALF)
571                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572                         else
573                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574                         break;
575                 case SPEED_2500:
576                         if (bp->duplex == DUPLEX_HALF)
577                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578                         else
579                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580                         break;
581                 }
582
583                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585                 if (bp->autoneg) {
586                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
588                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
590
591                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594                         else
595                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596                 }
597         }
598         else
599                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602 }
603
604 static void
605 bnx2_report_link(struct bnx2 *bp)
606 {
607         if (bp->link_up) {
608                 netif_carrier_on(bp->dev);
609                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611                 printk("%d Mbps ", bp->line_speed);
612
613                 if (bp->duplex == DUPLEX_FULL)
614                         printk("full duplex");
615                 else
616                         printk("half duplex");
617
618                 if (bp->flow_ctrl) {
619                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
620                                 printk(", receive ");
621                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
622                                         printk("& transmit ");
623                         }
624                         else {
625                                 printk(", transmit ");
626                         }
627                         printk("flow control ON");
628                 }
629                 printk("\n");
630         }
631         else {
632                 netif_carrier_off(bp->dev);
633                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634         }
635
636         bnx2_report_fw_link(bp);
637 }
638
639 static void
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641 {
642         u32 local_adv, remote_adv;
643
644         bp->flow_ctrl = 0;
645         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648                 if (bp->duplex == DUPLEX_FULL) {
649                         bp->flow_ctrl = bp->req_flow_ctrl;
650                 }
651                 return;
652         }
653
654         if (bp->duplex != DUPLEX_FULL) {
655                 return;
656         }
657
658         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660                 u32 val;
661
662                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664                         bp->flow_ctrl |= FLOW_CTRL_TX;
665                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666                         bp->flow_ctrl |= FLOW_CTRL_RX;
667                 return;
668         }
669
670         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
672
673         if (bp->phy_flags & PHY_SERDES_FLAG) {
674                 u32 new_local_adv = 0;
675                 u32 new_remote_adv = 0;
676
677                 if (local_adv & ADVERTISE_1000XPAUSE)
678                         new_local_adv |= ADVERTISE_PAUSE_CAP;
679                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
681                 if (remote_adv & ADVERTISE_1000XPAUSE)
682                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
683                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686                 local_adv = new_local_adv;
687                 remote_adv = new_remote_adv;
688         }
689
690         /* See Table 28B-3 of 802.3ab-1999 spec. */
691         if (local_adv & ADVERTISE_PAUSE_CAP) {
692                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
694                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695                         }
696                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697                                 bp->flow_ctrl = FLOW_CTRL_RX;
698                         }
699                 }
700                 else {
701                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
702                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703                         }
704                 }
705         }
706         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710                         bp->flow_ctrl = FLOW_CTRL_TX;
711                 }
712         }
713 }
714
715 static int
716 bnx2_5709s_linkup(struct bnx2 *bp)
717 {
718         u32 val, speed;
719
720         bp->link_up = 1;
721
722         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727                 bp->line_speed = bp->req_line_speed;
728                 bp->duplex = bp->req_duplex;
729                 return 0;
730         }
731         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732         switch (speed) {
733                 case MII_BNX2_GP_TOP_AN_SPEED_10:
734                         bp->line_speed = SPEED_10;
735                         break;
736                 case MII_BNX2_GP_TOP_AN_SPEED_100:
737                         bp->line_speed = SPEED_100;
738                         break;
739                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741                         bp->line_speed = SPEED_1000;
742                         break;
743                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744                         bp->line_speed = SPEED_2500;
745                         break;
746         }
747         if (val & MII_BNX2_GP_TOP_AN_FD)
748                 bp->duplex = DUPLEX_FULL;
749         else
750                 bp->duplex = DUPLEX_HALF;
751         return 0;
752 }
753
754 static int
755 bnx2_5708s_linkup(struct bnx2 *bp)
756 {
757         u32 val;
758
759         bp->link_up = 1;
760         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762                 case BCM5708S_1000X_STAT1_SPEED_10:
763                         bp->line_speed = SPEED_10;
764                         break;
765                 case BCM5708S_1000X_STAT1_SPEED_100:
766                         bp->line_speed = SPEED_100;
767                         break;
768                 case BCM5708S_1000X_STAT1_SPEED_1G:
769                         bp->line_speed = SPEED_1000;
770                         break;
771                 case BCM5708S_1000X_STAT1_SPEED_2G5:
772                         bp->line_speed = SPEED_2500;
773                         break;
774         }
775         if (val & BCM5708S_1000X_STAT1_FD)
776                 bp->duplex = DUPLEX_FULL;
777         else
778                 bp->duplex = DUPLEX_HALF;
779
780         return 0;
781 }
782
783 static int
784 bnx2_5706s_linkup(struct bnx2 *bp)
785 {
786         u32 bmcr, local_adv, remote_adv, common;
787
788         bp->link_up = 1;
789         bp->line_speed = SPEED_1000;
790
791         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792         if (bmcr & BMCR_FULLDPLX) {
793                 bp->duplex = DUPLEX_FULL;
794         }
795         else {
796                 bp->duplex = DUPLEX_HALF;
797         }
798
799         if (!(bmcr & BMCR_ANENABLE)) {
800                 return 0;
801         }
802
803         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
805
806         common = local_adv & remote_adv;
807         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809                 if (common & ADVERTISE_1000XFULL) {
810                         bp->duplex = DUPLEX_FULL;
811                 }
812                 else {
813                         bp->duplex = DUPLEX_HALF;
814                 }
815         }
816
817         return 0;
818 }
819
820 static int
821 bnx2_copper_linkup(struct bnx2 *bp)
822 {
823         u32 bmcr;
824
825         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826         if (bmcr & BMCR_ANENABLE) {
827                 u32 local_adv, remote_adv, common;
828
829                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832                 common = local_adv & (remote_adv >> 2);
833                 if (common & ADVERTISE_1000FULL) {
834                         bp->line_speed = SPEED_1000;
835                         bp->duplex = DUPLEX_FULL;
836                 }
837                 else if (common & ADVERTISE_1000HALF) {
838                         bp->line_speed = SPEED_1000;
839                         bp->duplex = DUPLEX_HALF;
840                 }
841                 else {
842                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
844
845                         common = local_adv & remote_adv;
846                         if (common & ADVERTISE_100FULL) {
847                                 bp->line_speed = SPEED_100;
848                                 bp->duplex = DUPLEX_FULL;
849                         }
850                         else if (common & ADVERTISE_100HALF) {
851                                 bp->line_speed = SPEED_100;
852                                 bp->duplex = DUPLEX_HALF;
853                         }
854                         else if (common & ADVERTISE_10FULL) {
855                                 bp->line_speed = SPEED_10;
856                                 bp->duplex = DUPLEX_FULL;
857                         }
858                         else if (common & ADVERTISE_10HALF) {
859                                 bp->line_speed = SPEED_10;
860                                 bp->duplex = DUPLEX_HALF;
861                         }
862                         else {
863                                 bp->line_speed = 0;
864                                 bp->link_up = 0;
865                         }
866                 }
867         }
868         else {
869                 if (bmcr & BMCR_SPEED100) {
870                         bp->line_speed = SPEED_100;
871                 }
872                 else {
873                         bp->line_speed = SPEED_10;
874                 }
875                 if (bmcr & BMCR_FULLDPLX) {
876                         bp->duplex = DUPLEX_FULL;
877                 }
878                 else {
879                         bp->duplex = DUPLEX_HALF;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int
887 bnx2_set_mac_link(struct bnx2 *bp)
888 {
889         u32 val;
890
891         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893                 (bp->duplex == DUPLEX_HALF)) {
894                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895         }
896
897         /* Configure the EMAC mode register. */
898         val = REG_RD(bp, BNX2_EMAC_MODE);
899
900         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902                 BNX2_EMAC_MODE_25G_MODE);
903
904         if (bp->link_up) {
905                 switch (bp->line_speed) {
906                         case SPEED_10:
907                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
909                                         break;
910                                 }
911                                 /* fall through */
912                         case SPEED_100:
913                                 val |= BNX2_EMAC_MODE_PORT_MII;
914                                 break;
915                         case SPEED_2500:
916                                 val |= BNX2_EMAC_MODE_25G_MODE;
917                                 /* fall through */
918                         case SPEED_1000:
919                                 val |= BNX2_EMAC_MODE_PORT_GMII;
920                                 break;
921                 }
922         }
923         else {
924                 val |= BNX2_EMAC_MODE_PORT_GMII;
925         }
926
927         /* Set the MAC to operate in the appropriate duplex mode. */
928         if (bp->duplex == DUPLEX_HALF)
929                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930         REG_WR(bp, BNX2_EMAC_MODE, val);
931
932         /* Enable/disable rx PAUSE. */
933         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935         if (bp->flow_ctrl & FLOW_CTRL_RX)
936                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939         /* Enable/disable tx PAUSE. */
940         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943         if (bp->flow_ctrl & FLOW_CTRL_TX)
944                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947         /* Acknowledge the interrupt. */
948         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950         return 0;
951 }
952
953 static void
954 bnx2_enable_bmsr1(struct bnx2 *bp)
955 {
956         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957             (CHIP_NUM(bp) == CHIP_NUM_5709))
958                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959                                MII_BNX2_BLK_ADDR_GP_STATUS);
960 }
961
962 static void
963 bnx2_disable_bmsr1(struct bnx2 *bp)
964 {
965         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966             (CHIP_NUM(bp) == CHIP_NUM_5709))
967                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969 }
970
971 static int
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
973 {
974         u32 up1;
975         int ret = 1;
976
977         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978                 return 0;
979
980         if (bp->autoneg & AUTONEG_SPEED)
981                 bp->advertising |= ADVERTISED_2500baseX_Full;
982
983         if (CHIP_NUM(bp) == CHIP_NUM_5709)
984                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
986         bnx2_read_phy(bp, bp->mii_up1, &up1);
987         if (!(up1 & BCM5708S_UP1_2G5)) {
988                 up1 |= BCM5708S_UP1_2G5;
989                 bnx2_write_phy(bp, bp->mii_up1, up1);
990                 ret = 0;
991         }
992
993         if (CHIP_NUM(bp) == CHIP_NUM_5709)
994                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
997         return ret;
998 }
999
1000 static int
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002 {
1003         u32 up1;
1004         int ret = 0;
1005
1006         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007                 return 0;
1008
1009         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012         bnx2_read_phy(bp, bp->mii_up1, &up1);
1013         if (up1 & BCM5708S_UP1_2G5) {
1014                 up1 &= ~BCM5708S_UP1_2G5;
1015                 bnx2_write_phy(bp, bp->mii_up1, up1);
1016                 ret = 1;
1017         }
1018
1019         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023         return ret;
1024 }
1025
1026 static void
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1028 {
1029         u32 bmcr;
1030
1031         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032                 return;
1033
1034         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035                 u32 val;
1036
1037                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1039                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051         }
1052
1053         if (bp->autoneg & AUTONEG_SPEED) {
1054                 bmcr &= ~BMCR_ANENABLE;
1055                 if (bp->req_duplex == DUPLEX_FULL)
1056                         bmcr |= BMCR_FULLDPLX;
1057         }
1058         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059 }
1060
1061 static void
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1063 {
1064         u32 bmcr;
1065
1066         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067                 return;
1068
1069         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070                 u32 val;
1071
1072                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1074                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085         }
1086
1087         if (bp->autoneg & AUTONEG_SPEED)
1088                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090 }
1091
1092 static int
1093 bnx2_set_link(struct bnx2 *bp)
1094 {
1095         u32 bmsr;
1096         u8 link_up;
1097
1098         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1099                 bp->link_up = 1;
1100                 return 0;
1101         }
1102
1103         link_up = bp->link_up;
1104
1105         bnx2_enable_bmsr1(bp);
1106         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108         bnx2_disable_bmsr1(bp);
1109
1110         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112                 u32 val;
1113
1114                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115                 if (val & BNX2_EMAC_STATUS_LINK)
1116                         bmsr |= BMSR_LSTATUS;
1117                 else
1118                         bmsr &= ~BMSR_LSTATUS;
1119         }
1120
1121         if (bmsr & BMSR_LSTATUS) {
1122                 bp->link_up = 1;
1123
1124                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126                                 bnx2_5706s_linkup(bp);
1127                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128                                 bnx2_5708s_linkup(bp);
1129                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130                                 bnx2_5709s_linkup(bp);
1131                 }
1132                 else {
1133                         bnx2_copper_linkup(bp);
1134                 }
1135                 bnx2_resolve_flow_ctrl(bp);
1136         }
1137         else {
1138                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139                     (bp->autoneg & AUTONEG_SPEED))
1140                         bnx2_disable_forced_2g5(bp);
1141
1142                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143                 bp->link_up = 0;
1144         }
1145
1146         if (bp->link_up != link_up) {
1147                 bnx2_report_link(bp);
1148         }
1149
1150         bnx2_set_mac_link(bp);
1151
1152         return 0;
1153 }
1154
1155 static int
1156 bnx2_reset_phy(struct bnx2 *bp)
1157 {
1158         int i;
1159         u32 reg;
1160
1161         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1162
1163 #define PHY_RESET_MAX_WAIT 100
1164         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165                 udelay(10);
1166
1167                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1168                 if (!(reg & BMCR_RESET)) {
1169                         udelay(20);
1170                         break;
1171                 }
1172         }
1173         if (i == PHY_RESET_MAX_WAIT) {
1174                 return -EBUSY;
1175         }
1176         return 0;
1177 }
1178
1179 static u32
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181 {
1182         u32 adv = 0;
1183
1184         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188                         adv = ADVERTISE_1000XPAUSE;
1189                 }
1190                 else {
1191                         adv = ADVERTISE_PAUSE_CAP;
1192                 }
1193         }
1194         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196                         adv = ADVERTISE_1000XPSE_ASYM;
1197                 }
1198                 else {
1199                         adv = ADVERTISE_PAUSE_ASYM;
1200                 }
1201         }
1202         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205                 }
1206                 else {
1207                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208                 }
1209         }
1210         return adv;
1211 }
1212
1213 static int
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1215 {
1216         u32 adv, bmcr;
1217         u32 new_adv = 0;
1218
1219         if (!(bp->autoneg & AUTONEG_SPEED)) {
1220                 u32 new_bmcr;
1221                 int force_link_down = 0;
1222
1223                 if (bp->req_line_speed == SPEED_2500) {
1224                         if (!bnx2_test_and_enable_2g5(bp))
1225                                 force_link_down = 1;
1226                 } else if (bp->req_line_speed == SPEED_1000) {
1227                         if (bnx2_test_and_disable_2g5(bp))
1228                                 force_link_down = 1;
1229                 }
1230                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235                 new_bmcr |= BMCR_SPEED1000;
1236
1237                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238                         if (bp->req_line_speed == SPEED_2500)
1239                                 bnx2_enable_forced_2g5(bp);
1240                         else if (bp->req_line_speed == SPEED_1000) {
1241                                 bnx2_disable_forced_2g5(bp);
1242                                 new_bmcr &= ~0x2000;
1243                         }
1244
1245                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246                         if (bp->req_line_speed == SPEED_2500)
1247                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248                         else
1249                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1250                 }
1251
1252                 if (bp->req_duplex == DUPLEX_FULL) {
1253                         adv |= ADVERTISE_1000XFULL;
1254                         new_bmcr |= BMCR_FULLDPLX;
1255                 }
1256                 else {
1257                         adv |= ADVERTISE_1000XHALF;
1258                         new_bmcr &= ~BMCR_FULLDPLX;
1259                 }
1260                 if ((new_bmcr != bmcr) || (force_link_down)) {
1261                         /* Force a link down visible on the other side */
1262                         if (bp->link_up) {
1263                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1264                                                ~(ADVERTISE_1000XFULL |
1265                                                  ADVERTISE_1000XHALF));
1266                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267                                         BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269                                 bp->link_up = 0;
1270                                 netif_carrier_off(bp->dev);
1271                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272                                 bnx2_report_link(bp);
1273                         }
1274                         bnx2_write_phy(bp, bp->mii_adv, adv);
1275                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1276                 } else {
1277                         bnx2_resolve_flow_ctrl(bp);
1278                         bnx2_set_mac_link(bp);
1279                 }
1280                 return 0;
1281         }
1282
1283         bnx2_test_and_enable_2g5(bp);
1284
1285         if (bp->advertising & ADVERTISED_1000baseT_Full)
1286                 new_adv |= ADVERTISE_1000XFULL;
1287
1288         new_adv |= bnx2_phy_get_pause_adv(bp);
1289
1290         bnx2_read_phy(bp, bp->mii_adv, &adv);
1291         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1292
1293         bp->serdes_an_pending = 0;
1294         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295                 /* Force a link down visible on the other side */
1296                 if (bp->link_up) {
1297                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298                         spin_unlock_bh(&bp->phy_lock);
1299                         msleep(20);
1300                         spin_lock_bh(&bp->phy_lock);
1301                 }
1302
1303                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1305                         BMCR_ANENABLE);
1306                 /* Speed up link-up time when the link partner
1307                  * does not autonegotiate which is very common
1308                  * in blade servers. Some blade servers use
1309                  * IPMI for kerboard input and it's important
1310                  * to minimize link disruptions. Autoneg. involves
1311                  * exchanging base pages plus 3 next pages and
1312                  * normally completes in about 120 msec.
1313                  */
1314                 bp->current_interval = SERDES_AN_TIMEOUT;
1315                 bp->serdes_an_pending = 1;
1316                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1317         } else {
1318                 bnx2_resolve_flow_ctrl(bp);
1319                 bnx2_set_mac_link(bp);
1320         }
1321
1322         return 0;
1323 }
1324
1325 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1326         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1327                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1328                 (ADVERTISED_1000baseT_Full)
1329
1330 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1331         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1332         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1333         ADVERTISED_1000baseT_Full)
1334
1335 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1336         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1337
1338 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339
1340 static void
1341 bnx2_set_default_link(struct bnx2 *bp)
1342 {
1343         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1344         bp->req_line_speed = 0;
1345         if (bp->phy_flags & PHY_SERDES_FLAG) {
1346                 u32 reg;
1347
1348                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1349
1350                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1351                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1352                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1353                         bp->autoneg = 0;
1354                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1355                         bp->req_duplex = DUPLEX_FULL;
1356                 }
1357         } else
1358                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1359 }
1360
1361 static int
1362 bnx2_setup_copper_phy(struct bnx2 *bp)
1363 {
1364         u32 bmcr;
1365         u32 new_bmcr;
1366
1367         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1368
1369         if (bp->autoneg & AUTONEG_SPEED) {
1370                 u32 adv_reg, adv1000_reg;
1371                 u32 new_adv_reg = 0;
1372                 u32 new_adv1000_reg = 0;
1373
1374                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1375                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1376                         ADVERTISE_PAUSE_ASYM);
1377
1378                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1379                 adv1000_reg &= PHY_ALL_1000_SPEED;
1380
1381                 if (bp->advertising & ADVERTISED_10baseT_Half)
1382                         new_adv_reg |= ADVERTISE_10HALF;
1383                 if (bp->advertising & ADVERTISED_10baseT_Full)
1384                         new_adv_reg |= ADVERTISE_10FULL;
1385                 if (bp->advertising & ADVERTISED_100baseT_Half)
1386                         new_adv_reg |= ADVERTISE_100HALF;
1387                 if (bp->advertising & ADVERTISED_100baseT_Full)
1388                         new_adv_reg |= ADVERTISE_100FULL;
1389                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1390                         new_adv1000_reg |= ADVERTISE_1000FULL;
1391
1392                 new_adv_reg |= ADVERTISE_CSMA;
1393
1394                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1395
1396                 if ((adv1000_reg != new_adv1000_reg) ||
1397                         (adv_reg != new_adv_reg) ||
1398                         ((bmcr & BMCR_ANENABLE) == 0)) {
1399
1400                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1401                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1402                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1403                                 BMCR_ANENABLE);
1404                 }
1405                 else if (bp->link_up) {
1406                         /* Flow ctrl may have changed from auto to forced */
1407                         /* or vice-versa. */
1408
1409                         bnx2_resolve_flow_ctrl(bp);
1410                         bnx2_set_mac_link(bp);
1411                 }
1412                 return 0;
1413         }
1414
1415         new_bmcr = 0;
1416         if (bp->req_line_speed == SPEED_100) {
1417                 new_bmcr |= BMCR_SPEED100;
1418         }
1419         if (bp->req_duplex == DUPLEX_FULL) {
1420                 new_bmcr |= BMCR_FULLDPLX;
1421         }
1422         if (new_bmcr != bmcr) {
1423                 u32 bmsr;
1424
1425                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1426                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1427
1428                 if (bmsr & BMSR_LSTATUS) {
1429                         /* Force link down */
1430                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1431                         spin_unlock_bh(&bp->phy_lock);
1432                         msleep(50);
1433                         spin_lock_bh(&bp->phy_lock);
1434
1435                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1436                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1437                 }
1438
1439                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1440
1441                 /* Normally, the new speed is setup after the link has
1442                  * gone down and up again. In some cases, link will not go
1443                  * down so we need to set up the new speed here.
1444                  */
1445                 if (bmsr & BMSR_LSTATUS) {
1446                         bp->line_speed = bp->req_line_speed;
1447                         bp->duplex = bp->req_duplex;
1448                         bnx2_resolve_flow_ctrl(bp);
1449                         bnx2_set_mac_link(bp);
1450                 }
1451         } else {
1452                 bnx2_resolve_flow_ctrl(bp);
1453                 bnx2_set_mac_link(bp);
1454         }
1455         return 0;
1456 }
1457
1458 static int
1459 bnx2_setup_phy(struct bnx2 *bp)
1460 {
1461         if (bp->loopback == MAC_LOOPBACK)
1462                 return 0;
1463
1464         if (bp->phy_flags & PHY_SERDES_FLAG) {
1465                 return (bnx2_setup_serdes_phy(bp));
1466         }
1467         else {
1468                 return (bnx2_setup_copper_phy(bp));
1469         }
1470 }
1471
1472 static int
1473 bnx2_init_5709s_phy(struct bnx2 *bp)
1474 {
1475         u32 val;
1476
1477         bp->mii_bmcr = MII_BMCR + 0x10;
1478         bp->mii_bmsr = MII_BMSR + 0x10;
1479         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1480         bp->mii_adv = MII_ADVERTISE + 0x10;
1481         bp->mii_lpa = MII_LPA + 0x10;
1482         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1483
1484         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1485         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1486
1487         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1488         bnx2_reset_phy(bp);
1489
1490         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1491
1492         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1493         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1494         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1495         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1496
1497         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1498         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1499         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1500                 val |= BCM5708S_UP1_2G5;
1501         else
1502                 val &= ~BCM5708S_UP1_2G5;
1503         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1504
1505         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1506         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1507         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1508         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1509
1510         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1511
1512         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1513               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1514         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1515
1516         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1517
1518         return 0;
1519 }
1520
1521 static int
1522 bnx2_init_5708s_phy(struct bnx2 *bp)
1523 {
1524         u32 val;
1525
1526         bnx2_reset_phy(bp);
1527
1528         bp->mii_up1 = BCM5708S_UP1;
1529
1530         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1531         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1532         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1533
1534         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1535         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1536         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1537
1538         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1539         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1540         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1541
1542         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1543                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1544                 val |= BCM5708S_UP1_2G5;
1545                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1546         }
1547
1548         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1549             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1550             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1551                 /* increase tx signal amplitude */
1552                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1553                                BCM5708S_BLK_ADDR_TX_MISC);
1554                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1555                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1556                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1557                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1558         }
1559
1560         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1561               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1562
1563         if (val) {
1564                 u32 is_backplane;
1565
1566                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1567                                           BNX2_SHARED_HW_CFG_CONFIG);
1568                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1569                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1570                                        BCM5708S_BLK_ADDR_TX_MISC);
1571                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1572                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1573                                        BCM5708S_BLK_ADDR_DIG);
1574                 }
1575         }
1576         return 0;
1577 }
1578
1579 static int
1580 bnx2_init_5706s_phy(struct bnx2 *bp)
1581 {
1582         bnx2_reset_phy(bp);
1583
1584         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1585
1586         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1587                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1588
1589         if (bp->dev->mtu > 1500) {
1590                 u32 val;
1591
1592                 /* Set extended packet length bit */
1593                 bnx2_write_phy(bp, 0x18, 0x7);
1594                 bnx2_read_phy(bp, 0x18, &val);
1595                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1596
1597                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1598                 bnx2_read_phy(bp, 0x1c, &val);
1599                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1600         }
1601         else {
1602                 u32 val;
1603
1604                 bnx2_write_phy(bp, 0x18, 0x7);
1605                 bnx2_read_phy(bp, 0x18, &val);
1606                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1607
1608                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1609                 bnx2_read_phy(bp, 0x1c, &val);
1610                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1611         }
1612
1613         return 0;
1614 }
1615
1616 static int
1617 bnx2_init_copper_phy(struct bnx2 *bp)
1618 {
1619         u32 val;
1620
1621         bnx2_reset_phy(bp);
1622
1623         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1624                 bnx2_write_phy(bp, 0x18, 0x0c00);
1625                 bnx2_write_phy(bp, 0x17, 0x000a);
1626                 bnx2_write_phy(bp, 0x15, 0x310b);
1627                 bnx2_write_phy(bp, 0x17, 0x201f);
1628                 bnx2_write_phy(bp, 0x15, 0x9506);
1629                 bnx2_write_phy(bp, 0x17, 0x401f);
1630                 bnx2_write_phy(bp, 0x15, 0x14e2);
1631                 bnx2_write_phy(bp, 0x18, 0x0400);
1632         }
1633
1634         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1635                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1636                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1637                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1638                 val &= ~(1 << 8);
1639                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1640         }
1641
1642         if (bp->dev->mtu > 1500) {
1643                 /* Set extended packet length bit */
1644                 bnx2_write_phy(bp, 0x18, 0x7);
1645                 bnx2_read_phy(bp, 0x18, &val);
1646                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1647
1648                 bnx2_read_phy(bp, 0x10, &val);
1649                 bnx2_write_phy(bp, 0x10, val | 0x1);
1650         }
1651         else {
1652                 bnx2_write_phy(bp, 0x18, 0x7);
1653                 bnx2_read_phy(bp, 0x18, &val);
1654                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1655
1656                 bnx2_read_phy(bp, 0x10, &val);
1657                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1658         }
1659
1660         /* ethernet@wirespeed */
1661         bnx2_write_phy(bp, 0x18, 0x7007);
1662         bnx2_read_phy(bp, 0x18, &val);
1663         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1664         return 0;
1665 }
1666
1667
1668 static int
1669 bnx2_init_phy(struct bnx2 *bp)
1670 {
1671         u32 val;
1672         int rc = 0;
1673
1674         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1675         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1676
1677         bp->mii_bmcr = MII_BMCR;
1678         bp->mii_bmsr = MII_BMSR;
1679         bp->mii_bmsr1 = MII_BMSR;
1680         bp->mii_adv = MII_ADVERTISE;
1681         bp->mii_lpa = MII_LPA;
1682
1683         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1684
1685         bnx2_read_phy(bp, MII_PHYSID1, &val);
1686         bp->phy_id = val << 16;
1687         bnx2_read_phy(bp, MII_PHYSID2, &val);
1688         bp->phy_id |= val & 0xffff;
1689
1690         if (bp->phy_flags & PHY_SERDES_FLAG) {
1691                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1692                         rc = bnx2_init_5706s_phy(bp);
1693                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1694                         rc = bnx2_init_5708s_phy(bp);
1695                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1696                         rc = bnx2_init_5709s_phy(bp);
1697         }
1698         else {
1699                 rc = bnx2_init_copper_phy(bp);
1700         }
1701
1702         bnx2_setup_phy(bp);
1703
1704         return rc;
1705 }
1706
1707 static int
1708 bnx2_set_mac_loopback(struct bnx2 *bp)
1709 {
1710         u32 mac_mode;
1711
1712         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1713         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1714         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1715         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1716         bp->link_up = 1;
1717         return 0;
1718 }
1719
1720 static int bnx2_test_link(struct bnx2 *);
1721
1722 static int
1723 bnx2_set_phy_loopback(struct bnx2 *bp)
1724 {
1725         u32 mac_mode;
1726         int rc, i;
1727
1728         spin_lock_bh(&bp->phy_lock);
1729         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1730                             BMCR_SPEED1000);
1731         spin_unlock_bh(&bp->phy_lock);
1732         if (rc)
1733                 return rc;
1734
1735         for (i = 0; i < 10; i++) {
1736                 if (bnx2_test_link(bp) == 0)
1737                         break;
1738                 msleep(100);
1739         }
1740
1741         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1742         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1743                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1744                       BNX2_EMAC_MODE_25G_MODE);
1745
1746         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1747         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1748         bp->link_up = 1;
1749         return 0;
1750 }
1751
1752 static int
1753 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1754 {
1755         int i;
1756         u32 val;
1757
1758         bp->fw_wr_seq++;
1759         msg_data |= bp->fw_wr_seq;
1760
1761         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1762
1763         /* wait for an acknowledgement. */
1764         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1765                 msleep(10);
1766
1767                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1768
1769                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1770                         break;
1771         }
1772         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1773                 return 0;
1774
1775         /* If we timed out, inform the firmware that this is the case. */
1776         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1777                 if (!silent)
1778                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1779                                             "%x\n", msg_data);
1780
1781                 msg_data &= ~BNX2_DRV_MSG_CODE;
1782                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1783
1784                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1785
1786                 return -EBUSY;
1787         }
1788
1789         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1790                 return -EIO;
1791
1792         return 0;
1793 }
1794
1795 static int
1796 bnx2_init_5709_context(struct bnx2 *bp)
1797 {
1798         int i, ret = 0;
1799         u32 val;
1800
1801         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1802         val |= (BCM_PAGE_BITS - 8) << 16;
1803         REG_WR(bp, BNX2_CTX_COMMAND, val);
1804         for (i = 0; i < 10; i++) {
1805                 val = REG_RD(bp, BNX2_CTX_COMMAND);
1806                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
1807                         break;
1808                 udelay(2);
1809         }
1810         if (val & BNX2_CTX_COMMAND_MEM_INIT)
1811                 return -EBUSY;
1812
1813         for (i = 0; i < bp->ctx_pages; i++) {
1814                 int j;
1815
1816                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1817                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1818                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1819                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1820                        (u64) bp->ctx_blk_mapping[i] >> 32);
1821                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1822                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1823                 for (j = 0; j < 10; j++) {
1824
1825                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1826                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1827                                 break;
1828                         udelay(5);
1829                 }
1830                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1831                         ret = -EBUSY;
1832                         break;
1833                 }
1834         }
1835         return ret;
1836 }
1837
1838 static void
1839 bnx2_init_context(struct bnx2 *bp)
1840 {
1841         u32 vcid;
1842
1843         vcid = 96;
1844         while (vcid) {
1845                 u32 vcid_addr, pcid_addr, offset;
1846                 int i;
1847
1848                 vcid--;
1849
1850                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1851                         u32 new_vcid;
1852
1853                         vcid_addr = GET_PCID_ADDR(vcid);
1854                         if (vcid & 0x8) {
1855                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1856                         }
1857                         else {
1858                                 new_vcid = vcid;
1859                         }
1860                         pcid_addr = GET_PCID_ADDR(new_vcid);
1861                 }
1862                 else {
1863                         vcid_addr = GET_CID_ADDR(vcid);
1864                         pcid_addr = vcid_addr;
1865                 }
1866
1867                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1868                         vcid_addr += (i << PHY_CTX_SHIFT);
1869                         pcid_addr += (i << PHY_CTX_SHIFT);
1870
1871                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1872                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1873
1874                         /* Zero out the context. */
1875                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1876                                 CTX_WR(bp, 0x00, offset, 0);
1877
1878                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1879                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1880                 }
1881         }
1882 }
1883
1884 static int
1885 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1886 {
1887         u16 *good_mbuf;
1888         u32 good_mbuf_cnt;
1889         u32 val;
1890
1891         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1892         if (good_mbuf == NULL) {
1893                 printk(KERN_ERR PFX "Failed to allocate memory in "
1894                                     "bnx2_alloc_bad_rbuf\n");
1895                 return -ENOMEM;
1896         }
1897
1898         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1899                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1900
1901         good_mbuf_cnt = 0;
1902
1903         /* Allocate a bunch of mbufs and save the good ones in an array. */
1904         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1905         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1906                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1907
1908                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1909
1910                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1911
1912                 /* The addresses with Bit 9 set are bad memory blocks. */
1913                 if (!(val & (1 << 9))) {
1914                         good_mbuf[good_mbuf_cnt] = (u16) val;
1915                         good_mbuf_cnt++;
1916                 }
1917
1918                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1919         }
1920
1921         /* Free the good ones back to the mbuf pool thus discarding
1922          * all the bad ones. */
1923         while (good_mbuf_cnt) {
1924                 good_mbuf_cnt--;
1925
1926                 val = good_mbuf[good_mbuf_cnt];
1927                 val = (val << 9) | val | 1;
1928
1929                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1930         }
1931         kfree(good_mbuf);
1932         return 0;
1933 }
1934
1935 static void
1936 bnx2_set_mac_addr(struct bnx2 *bp)
1937 {
1938         u32 val;
1939         u8 *mac_addr = bp->dev->dev_addr;
1940
1941         val = (mac_addr[0] << 8) | mac_addr[1];
1942
1943         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1944
1945         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1946                 (mac_addr[4] << 8) | mac_addr[5];
1947
1948         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1949 }
1950
1951 static inline int
1952 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1953 {
1954         struct sk_buff *skb;
1955         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1956         dma_addr_t mapping;
1957         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1958         unsigned long align;
1959
1960         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1961         if (skb == NULL) {
1962                 return -ENOMEM;
1963         }
1964
1965         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1966                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1967
1968         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1969                 PCI_DMA_FROMDEVICE);
1970
1971         rx_buf->skb = skb;
1972         pci_unmap_addr_set(rx_buf, mapping, mapping);
1973
1974         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1975         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1976
1977         bp->rx_prod_bseq += bp->rx_buf_use_size;
1978
1979         return 0;
1980 }
1981
1982 static int
1983 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1984 {
1985         struct status_block *sblk = bp->status_blk;
1986         u32 new_link_state, old_link_state;
1987         int is_set = 1;
1988
1989         new_link_state = sblk->status_attn_bits & event;
1990         old_link_state = sblk->status_attn_bits_ack & event;
1991         if (new_link_state != old_link_state) {
1992                 if (new_link_state)
1993                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1994                 else
1995                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1996         } else
1997                 is_set = 0;
1998
1999         return is_set;
2000 }
2001
2002 static void
2003 bnx2_phy_int(struct bnx2 *bp)
2004 {
2005         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2006                 spin_lock(&bp->phy_lock);
2007                 bnx2_set_link(bp);
2008                 spin_unlock(&bp->phy_lock);
2009         }
2010 }
2011
2012 static void
2013 bnx2_tx_int(struct bnx2 *bp)
2014 {
2015         struct status_block *sblk = bp->status_blk;
2016         u16 hw_cons, sw_cons, sw_ring_cons;
2017         int tx_free_bd = 0;
2018
2019         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2020         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2021                 hw_cons++;
2022         }
2023         sw_cons = bp->tx_cons;
2024
2025         while (sw_cons != hw_cons) {
2026                 struct sw_bd *tx_buf;
2027                 struct sk_buff *skb;
2028                 int i, last;
2029
2030                 sw_ring_cons = TX_RING_IDX(sw_cons);
2031
2032                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2033                 skb = tx_buf->skb;
2034
2035                 /* partial BD completions possible with TSO packets */
2036                 if (skb_is_gso(skb)) {
2037                         u16 last_idx, last_ring_idx;
2038
2039                         last_idx = sw_cons +
2040                                 skb_shinfo(skb)->nr_frags + 1;
2041                         last_ring_idx = sw_ring_cons +
2042                                 skb_shinfo(skb)->nr_frags + 1;
2043                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2044                                 last_idx++;
2045                         }
2046                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2047                                 break;
2048                         }
2049                 }
2050
2051                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2052                         skb_headlen(skb), PCI_DMA_TODEVICE);
2053
2054                 tx_buf->skb = NULL;
2055                 last = skb_shinfo(skb)->nr_frags;
2056
2057                 for (i = 0; i < last; i++) {
2058                         sw_cons = NEXT_TX_BD(sw_cons);
2059
2060                         pci_unmap_page(bp->pdev,
2061                                 pci_unmap_addr(
2062                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2063                                         mapping),
2064                                 skb_shinfo(skb)->frags[i].size,
2065                                 PCI_DMA_TODEVICE);
2066                 }
2067
2068                 sw_cons = NEXT_TX_BD(sw_cons);
2069
2070                 tx_free_bd += last + 1;
2071
2072                 dev_kfree_skb(skb);
2073
2074                 hw_cons = bp->hw_tx_cons =
2075                         sblk->status_tx_quick_consumer_index0;
2076
2077                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2078                         hw_cons++;
2079                 }
2080         }
2081
2082         bp->tx_cons = sw_cons;
2083         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2084          * before checking for netif_queue_stopped().  Without the
2085          * memory barrier, there is a small possibility that bnx2_start_xmit()
2086          * will miss it and cause the queue to be stopped forever.
2087          */
2088         smp_mb();
2089
2090         if (unlikely(netif_queue_stopped(bp->dev)) &&
2091                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2092                 netif_tx_lock(bp->dev);
2093                 if ((netif_queue_stopped(bp->dev)) &&
2094                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2095                         netif_wake_queue(bp->dev);
2096                 netif_tx_unlock(bp->dev);
2097         }
2098 }
2099
2100 static inline void
2101 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2102         u16 cons, u16 prod)
2103 {
2104         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2105         struct rx_bd *cons_bd, *prod_bd;
2106
2107         cons_rx_buf = &bp->rx_buf_ring[cons];
2108         prod_rx_buf = &bp->rx_buf_ring[prod];
2109
2110         pci_dma_sync_single_for_device(bp->pdev,
2111                 pci_unmap_addr(cons_rx_buf, mapping),
2112                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2113
2114         bp->rx_prod_bseq += bp->rx_buf_use_size;
2115
2116         prod_rx_buf->skb = skb;
2117
2118         if (cons == prod)
2119                 return;
2120
2121         pci_unmap_addr_set(prod_rx_buf, mapping,
2122                         pci_unmap_addr(cons_rx_buf, mapping));
2123
2124         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2125         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2126         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2127         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2128 }
2129
2130 static int
2131 bnx2_rx_int(struct bnx2 *bp, int budget)
2132 {
2133         struct status_block *sblk = bp->status_blk;
2134         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2135         struct l2_fhdr *rx_hdr;
2136         int rx_pkt = 0;
2137
2138         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2139         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2140                 hw_cons++;
2141         }
2142         sw_cons = bp->rx_cons;
2143         sw_prod = bp->rx_prod;
2144
2145         /* Memory barrier necessary as speculative reads of the rx
2146          * buffer can be ahead of the index in the status block
2147          */
2148         rmb();
2149         while (sw_cons != hw_cons) {
2150                 unsigned int len;
2151                 u32 status;
2152                 struct sw_bd *rx_buf;
2153                 struct sk_buff *skb;
2154                 dma_addr_t dma_addr;
2155
2156                 sw_ring_cons = RX_RING_IDX(sw_cons);
2157                 sw_ring_prod = RX_RING_IDX(sw_prod);
2158
2159                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2160                 skb = rx_buf->skb;
2161
2162                 rx_buf->skb = NULL;
2163
2164                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2165
2166                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2167                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2168
2169                 rx_hdr = (struct l2_fhdr *) skb->data;
2170                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2171
2172                 if ((status = rx_hdr->l2_fhdr_status) &
2173                         (L2_FHDR_ERRORS_BAD_CRC |
2174                         L2_FHDR_ERRORS_PHY_DECODE |
2175                         L2_FHDR_ERRORS_ALIGNMENT |
2176                         L2_FHDR_ERRORS_TOO_SHORT |
2177                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2178
2179                         goto reuse_rx;
2180                 }
2181
2182                 /* Since we don't have a jumbo ring, copy small packets
2183                  * if mtu > 1500
2184                  */
2185                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2186                         struct sk_buff *new_skb;
2187
2188                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2189                         if (new_skb == NULL)
2190                                 goto reuse_rx;
2191
2192                         /* aligned copy */
2193                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2194                                       new_skb->data, len + 2);
2195                         skb_reserve(new_skb, 2);
2196                         skb_put(new_skb, len);
2197
2198                         bnx2_reuse_rx_skb(bp, skb,
2199                                 sw_ring_cons, sw_ring_prod);
2200
2201                         skb = new_skb;
2202                 }
2203                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2204                         pci_unmap_single(bp->pdev, dma_addr,
2205                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2206
2207                         skb_reserve(skb, bp->rx_offset);
2208                         skb_put(skb, len);
2209                 }
2210                 else {
2211 reuse_rx:
2212                         bnx2_reuse_rx_skb(bp, skb,
2213                                 sw_ring_cons, sw_ring_prod);
2214                         goto next_rx;
2215                 }
2216
2217                 skb->protocol = eth_type_trans(skb, bp->dev);
2218
2219                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2220                         (ntohs(skb->protocol) != 0x8100)) {
2221
2222                         dev_kfree_skb(skb);
2223                         goto next_rx;
2224
2225                 }
2226
2227                 skb->ip_summed = CHECKSUM_NONE;
2228                 if (bp->rx_csum &&
2229                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2230                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2231
2232                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2233                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2234                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2235                 }
2236
2237 #ifdef BCM_VLAN
2238                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2239                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2240                                 rx_hdr->l2_fhdr_vlan_tag);
2241                 }
2242                 else
2243 #endif
2244                         netif_receive_skb(skb);
2245
2246                 bp->dev->last_rx = jiffies;
2247                 rx_pkt++;
2248
2249 next_rx:
2250                 sw_cons = NEXT_RX_BD(sw_cons);
2251                 sw_prod = NEXT_RX_BD(sw_prod);
2252
2253                 if ((rx_pkt == budget))
2254                         break;
2255
2256                 /* Refresh hw_cons to see if there is new work */
2257                 if (sw_cons == hw_cons) {
2258                         hw_cons = bp->hw_rx_cons =
2259                                 sblk->status_rx_quick_consumer_index0;
2260                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2261                                 hw_cons++;
2262                         rmb();
2263                 }
2264         }
2265         bp->rx_cons = sw_cons;
2266         bp->rx_prod = sw_prod;
2267
2268         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2269
2270         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2271
2272         mmiowb();
2273
2274         return rx_pkt;
2275
2276 }
2277
2278 /* MSI ISR - The only difference between this and the INTx ISR
2279  * is that the MSI interrupt is always serviced.
2280  */
2281 static irqreturn_t
2282 bnx2_msi(int irq, void *dev_instance)
2283 {
2284         struct net_device *dev = dev_instance;
2285         struct bnx2 *bp = netdev_priv(dev);
2286
2287         prefetch(bp->status_blk);
2288         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2289                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2290                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2291
2292         /* Return here if interrupt is disabled. */
2293         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2294                 return IRQ_HANDLED;
2295
2296         netif_rx_schedule(dev);
2297
2298         return IRQ_HANDLED;
2299 }
2300
2301 static irqreturn_t
2302 bnx2_msi_1shot(int irq, void *dev_instance)
2303 {
2304         struct net_device *dev = dev_instance;
2305         struct bnx2 *bp = netdev_priv(dev);
2306
2307         prefetch(bp->status_blk);
2308
2309         /* Return here if interrupt is disabled. */
2310         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2311                 return IRQ_HANDLED;
2312
2313         netif_rx_schedule(dev);
2314
2315         return IRQ_HANDLED;
2316 }
2317
2318 static irqreturn_t
2319 bnx2_interrupt(int irq, void *dev_instance)
2320 {
2321         struct net_device *dev = dev_instance;
2322         struct bnx2 *bp = netdev_priv(dev);
2323
2324         /* When using INTx, it is possible for the interrupt to arrive
2325          * at the CPU before the status block posted prior to the
2326          * interrupt. Reading a register will flush the status block.
2327          * When using MSI, the MSI message will always complete after
2328          * the status block write.
2329          */
2330         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2331             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2332              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2333                 return IRQ_NONE;
2334
2335         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2336                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2337                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2338
2339         /* Return here if interrupt is shared and is disabled. */
2340         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2341                 return IRQ_HANDLED;
2342
2343         netif_rx_schedule(dev);
2344
2345         return IRQ_HANDLED;
2346 }
2347
2348 #define STATUS_ATTN_EVENTS      STATUS_ATTN_BITS_LINK_STATE
2349
2350 static inline int
2351 bnx2_has_work(struct bnx2 *bp)
2352 {
2353         struct status_block *sblk = bp->status_blk;
2354
2355         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2356             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2357                 return 1;
2358
2359         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2360             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2361                 return 1;
2362
2363         return 0;
2364 }
2365
2366 static int
2367 bnx2_poll(struct net_device *dev, int *budget)
2368 {
2369         struct bnx2 *bp = netdev_priv(dev);
2370         struct status_block *sblk = bp->status_blk;
2371         u32 status_attn_bits = sblk->status_attn_bits;
2372         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2373
2374         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2375             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2376
2377                 bnx2_phy_int(bp);
2378
2379                 /* This is needed to take care of transient status
2380                  * during link changes.
2381                  */
2382                 REG_WR(bp, BNX2_HC_COMMAND,
2383                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2384                 REG_RD(bp, BNX2_HC_COMMAND);
2385         }
2386
2387         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2388                 bnx2_tx_int(bp);
2389
2390         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2391                 int orig_budget = *budget;
2392                 int work_done;
2393
2394                 if (orig_budget > dev->quota)
2395                         orig_budget = dev->quota;
2396
2397                 work_done = bnx2_rx_int(bp, orig_budget);
2398                 *budget -= work_done;
2399                 dev->quota -= work_done;
2400         }
2401
2402         bp->last_status_idx = bp->status_blk->status_idx;
2403         rmb();
2404
2405         if (!bnx2_has_work(bp)) {
2406                 netif_rx_complete(dev);
2407                 if (likely(bp->flags & USING_MSI_FLAG)) {
2408                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2409                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2410                                bp->last_status_idx);
2411                         return 0;
2412                 }
2413                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2414                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2415                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2416                        bp->last_status_idx);
2417
2418                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2419                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2420                        bp->last_status_idx);
2421                 return 0;
2422         }
2423
2424         return 1;
2425 }
2426
2427 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2428  * from set_multicast.
2429  */
2430 static void
2431 bnx2_set_rx_mode(struct net_device *dev)
2432 {
2433         struct bnx2 *bp = netdev_priv(dev);
2434         u32 rx_mode, sort_mode;
2435         int i;
2436
2437         spin_lock_bh(&bp->phy_lock);
2438
2439         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2440                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2441         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2442 #ifdef BCM_VLAN
2443         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2444                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2445 #else
2446         if (!(bp->flags & ASF_ENABLE_FLAG))
2447                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2448 #endif
2449         if (dev->flags & IFF_PROMISC) {
2450                 /* Promiscuous mode. */
2451                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2452                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2453                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2454         }
2455         else if (dev->flags & IFF_ALLMULTI) {
2456                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2457                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2458                                0xffffffff);
2459                 }
2460                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2461         }
2462         else {
2463                 /* Accept one or more multicast(s). */
2464                 struct dev_mc_list *mclist;
2465                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2466                 u32 regidx;
2467                 u32 bit;
2468                 u32 crc;
2469
2470                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2471
2472                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2473                      i++, mclist = mclist->next) {
2474
2475                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2476                         bit = crc & 0xff;
2477                         regidx = (bit & 0xe0) >> 5;
2478                         bit &= 0x1f;
2479                         mc_filter[regidx] |= (1 << bit);
2480                 }
2481
2482                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2483                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2484                                mc_filter[i]);
2485                 }
2486
2487                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2488         }
2489
2490         if (rx_mode != bp->rx_mode) {
2491                 bp->rx_mode = rx_mode;
2492                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2493         }
2494
2495         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2496         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2497         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2498
2499         spin_unlock_bh(&bp->phy_lock);
2500 }
2501
2502 #define FW_BUF_SIZE     0x8000
2503
2504 static int
2505 bnx2_gunzip_init(struct bnx2 *bp)
2506 {
2507         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2508                 goto gunzip_nomem1;
2509
2510         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2511                 goto gunzip_nomem2;
2512
2513         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2514         if (bp->strm->workspace == NULL)
2515                 goto gunzip_nomem3;
2516
2517         return 0;
2518
2519 gunzip_nomem3:
2520         kfree(bp->strm);
2521         bp->strm = NULL;
2522
2523 gunzip_nomem2:
2524         vfree(bp->gunzip_buf);
2525         bp->gunzip_buf = NULL;
2526
2527 gunzip_nomem1:
2528         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2529                             "uncompression.\n", bp->dev->name);
2530         return -ENOMEM;
2531 }
2532
2533 static void
2534 bnx2_gunzip_end(struct bnx2 *bp)
2535 {
2536         kfree(bp->strm->workspace);
2537
2538         kfree(bp->strm);
2539         bp->strm = NULL;
2540
2541         if (bp->gunzip_buf) {
2542                 vfree(bp->gunzip_buf);
2543                 bp->gunzip_buf = NULL;
2544         }
2545 }
2546
2547 static int
2548 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2549 {
2550         int n, rc;
2551
2552         /* check gzip header */
2553         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2554                 return -EINVAL;
2555
2556         n = 10;
2557
2558 #define FNAME   0x8
2559         if (zbuf[3] & FNAME)
2560                 while ((zbuf[n++] != 0) && (n < len));
2561
2562         bp->strm->next_in = zbuf + n;
2563         bp->strm->avail_in = len - n;
2564         bp->strm->next_out = bp->gunzip_buf;
2565         bp->strm->avail_out = FW_BUF_SIZE;
2566
2567         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2568         if (rc != Z_OK)
2569                 return rc;
2570
2571         rc = zlib_inflate(bp->strm, Z_FINISH);
2572
2573         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2574         *outbuf = bp->gunzip_buf;
2575
2576         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2577                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2578                        bp->dev->name, bp->strm->msg);
2579
2580         zlib_inflateEnd(bp->strm);
2581
2582         if (rc == Z_STREAM_END)
2583                 return 0;
2584
2585         return rc;
2586 }
2587
2588 static void
2589 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2590         u32 rv2p_proc)
2591 {
2592         int i;
2593         u32 val;
2594
2595
2596         for (i = 0; i < rv2p_code_len; i += 8) {
2597                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2598                 rv2p_code++;
2599                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2600                 rv2p_code++;
2601
2602                 if (rv2p_proc == RV2P_PROC1) {
2603                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2604                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2605                 }
2606                 else {
2607                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2608                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2609                 }
2610         }
2611
2612         /* Reset the processor, un-stall is done later. */
2613         if (rv2p_proc == RV2P_PROC1) {
2614                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2615         }
2616         else {
2617                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2618         }
2619 }
2620
2621 static int
2622 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2623 {
2624         u32 offset;
2625         u32 val;
2626         int rc;
2627
2628         /* Halt the CPU. */
2629         val = REG_RD_IND(bp, cpu_reg->mode);
2630         val |= cpu_reg->mode_value_halt;
2631         REG_WR_IND(bp, cpu_reg->mode, val);
2632         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2633
2634         /* Load the Text area. */
2635         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2636         if (fw->gz_text) {
2637                 u32 text_len;
2638                 void *text;
2639
2640                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2641                                  &text_len);
2642                 if (rc)
2643                         return rc;
2644
2645                 fw->text = text;
2646         }
2647         if (fw->gz_text) {
2648                 int j;
2649
2650                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2651                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2652                 }
2653         }
2654
2655         /* Load the Data area. */
2656         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2657         if (fw->data) {
2658                 int j;
2659
2660                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2661                         REG_WR_IND(bp, offset, fw->data[j]);
2662                 }
2663         }
2664
2665         /* Load the SBSS area. */
2666         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2667         if (fw->sbss) {
2668                 int j;
2669
2670                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2671                         REG_WR_IND(bp, offset, fw->sbss[j]);
2672                 }
2673         }
2674
2675         /* Load the BSS area. */
2676         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2677         if (fw->bss) {
2678                 int j;
2679
2680                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2681                         REG_WR_IND(bp, offset, fw->bss[j]);
2682                 }
2683         }
2684
2685         /* Load the Read-Only area. */
2686         offset = cpu_reg->spad_base +
2687                 (fw->rodata_addr - cpu_reg->mips_view_base);
2688         if (fw->rodata) {
2689                 int j;
2690
2691                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2692                         REG_WR_IND(bp, offset, fw->rodata[j]);
2693                 }
2694         }
2695
2696         /* Clear the pre-fetch instruction. */
2697         REG_WR_IND(bp, cpu_reg->inst, 0);
2698         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2699
2700         /* Start the CPU. */
2701         val = REG_RD_IND(bp, cpu_reg->mode);
2702         val &= ~cpu_reg->mode_value_halt;
2703         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2704         REG_WR_IND(bp, cpu_reg->mode, val);
2705
2706         return 0;
2707 }
2708
2709 static int
2710 bnx2_init_cpus(struct bnx2 *bp)
2711 {
2712         struct cpu_reg cpu_reg;
2713         struct fw_info *fw;
2714         int rc = 0;
2715         void *text;
2716         u32 text_len;
2717
2718         if ((rc = bnx2_gunzip_init(bp)) != 0)
2719                 return rc;
2720
2721         /* Initialize the RV2P processor. */
2722         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2723                          &text_len);
2724         if (rc)
2725                 goto init_cpu_err;
2726
2727         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2728
2729         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2730                          &text_len);
2731         if (rc)
2732                 goto init_cpu_err;
2733
2734         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2735
2736         /* Initialize the RX Processor. */
2737         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2738         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2739         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2740         cpu_reg.state = BNX2_RXP_CPU_STATE;
2741         cpu_reg.state_value_clear = 0xffffff;
2742         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2743         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2744         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2745         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2746         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2747         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2748         cpu_reg.mips_view_base = 0x8000000;
2749
2750         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2751                 fw = &bnx2_rxp_fw_09;
2752         else
2753                 fw = &bnx2_rxp_fw_06;
2754
2755         rc = load_cpu_fw(bp, &cpu_reg, fw);
2756         if (rc)
2757                 goto init_cpu_err;
2758
2759         /* Initialize the TX Processor. */
2760         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2761         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2762         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2763         cpu_reg.state = BNX2_TXP_CPU_STATE;
2764         cpu_reg.state_value_clear = 0xffffff;
2765         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2766         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2767         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2768         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2769         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2770         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2771         cpu_reg.mips_view_base = 0x8000000;
2772
2773         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2774                 fw = &bnx2_txp_fw_09;
2775         else
2776                 fw = &bnx2_txp_fw_06;
2777
2778         rc = load_cpu_fw(bp, &cpu_reg, fw);
2779         if (rc)
2780                 goto init_cpu_err;
2781
2782         /* Initialize the TX Patch-up Processor. */
2783         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2784         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2785         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2786         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2787         cpu_reg.state_value_clear = 0xffffff;
2788         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2789         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2790         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2791         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2792         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2793         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2794         cpu_reg.mips_view_base = 0x8000000;
2795
2796         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2797                 fw = &bnx2_tpat_fw_09;
2798         else
2799                 fw = &bnx2_tpat_fw_06;
2800
2801         rc = load_cpu_fw(bp, &cpu_reg, fw);
2802         if (rc)
2803                 goto init_cpu_err;
2804
2805         /* Initialize the Completion Processor. */
2806         cpu_reg.mode = BNX2_COM_CPU_MODE;
2807         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2808         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2809         cpu_reg.state = BNX2_COM_CPU_STATE;
2810         cpu_reg.state_value_clear = 0xffffff;
2811         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2812         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2813         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2814         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2815         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2816         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2817         cpu_reg.mips_view_base = 0x8000000;
2818
2819         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2820                 fw = &bnx2_com_fw_09;
2821         else
2822                 fw = &bnx2_com_fw_06;
2823
2824         rc = load_cpu_fw(bp, &cpu_reg, fw);
2825         if (rc)
2826                 goto init_cpu_err;
2827
2828         /* Initialize the Command Processor. */
2829         cpu_reg.mode = BNX2_CP_CPU_MODE;
2830         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2831         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2832         cpu_reg.state = BNX2_CP_CPU_STATE;
2833         cpu_reg.state_value_clear = 0xffffff;
2834         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2835         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2836         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2837         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2838         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2839         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2840         cpu_reg.mips_view_base = 0x8000000;
2841
2842         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2843                 fw = &bnx2_cp_fw_09;
2844
2845                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2846                 if (rc)
2847                         goto init_cpu_err;
2848         }
2849 init_cpu_err:
2850         bnx2_gunzip_end(bp);
2851         return rc;
2852 }
2853
2854 static int
2855 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2856 {
2857         u16 pmcsr;
2858
2859         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2860
2861         switch (state) {
2862         case PCI_D0: {
2863                 u32 val;
2864
2865                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2866                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2867                         PCI_PM_CTRL_PME_STATUS);
2868
2869                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2870                         /* delay required during transition out of D3hot */
2871                         msleep(20);
2872
2873                 val = REG_RD(bp, BNX2_EMAC_MODE);
2874                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2875                 val &= ~BNX2_EMAC_MODE_MPKT;
2876                 REG_WR(bp, BNX2_EMAC_MODE, val);
2877
2878                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2879                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2880                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2881                 break;
2882         }
2883         case PCI_D3hot: {
2884                 int i;
2885                 u32 val, wol_msg;
2886
2887                 if (bp->wol) {
2888                         u32 advertising;
2889                         u8 autoneg;
2890
2891                         autoneg = bp->autoneg;
2892                         advertising = bp->advertising;
2893
2894                         bp->autoneg = AUTONEG_SPEED;
2895                         bp->advertising = ADVERTISED_10baseT_Half |
2896                                 ADVERTISED_10baseT_Full |
2897                                 ADVERTISED_100baseT_Half |
2898                                 ADVERTISED_100baseT_Full |
2899                                 ADVERTISED_Autoneg;
2900
2901                         bnx2_setup_copper_phy(bp);
2902
2903                         bp->autoneg = autoneg;
2904                         bp->advertising = advertising;
2905
2906                         bnx2_set_mac_addr(bp);
2907
2908                         val = REG_RD(bp, BNX2_EMAC_MODE);
2909
2910                         /* Enable port mode. */
2911                         val &= ~BNX2_EMAC_MODE_PORT;
2912                         val |= BNX2_EMAC_MODE_PORT_MII |
2913                                BNX2_EMAC_MODE_MPKT_RCVD |
2914                                BNX2_EMAC_MODE_ACPI_RCVD |
2915                                BNX2_EMAC_MODE_MPKT;
2916
2917                         REG_WR(bp, BNX2_EMAC_MODE, val);
2918
2919                         /* receive all multicast */
2920                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2921                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2922                                        0xffffffff);
2923                         }
2924                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2925                                BNX2_EMAC_RX_MODE_SORT_MODE);
2926
2927                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2928                               BNX2_RPM_SORT_USER0_MC_EN;
2929                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2930                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2931                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2932                                BNX2_RPM_SORT_USER0_ENA);
2933
2934                         /* Need to enable EMAC and RPM for WOL. */
2935                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2936                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2937                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2938                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2939
2940                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2941                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2942                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2943
2944                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2945                 }
2946                 else {
2947                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2948                 }
2949
2950                 if (!(bp->flags & NO_WOL_FLAG))
2951                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2952
2953                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2954                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2955                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2956
2957                         if (bp->wol)
2958                                 pmcsr |= 3;
2959                 }
2960                 else {
2961                         pmcsr |= 3;
2962                 }
2963                 if (bp->wol) {
2964                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2965                 }
2966                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2967                                       pmcsr);
2968
2969                 /* No more memory access after this point until
2970                  * device is brought back to D0.
2971                  */
2972                 udelay(50);
2973                 break;
2974         }
2975         default:
2976                 return -EINVAL;
2977         }
2978         return 0;
2979 }
2980
2981 static int
2982 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2983 {
2984         u32 val;
2985         int j;
2986
2987         /* Request access to the flash interface. */
2988         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2989         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2990                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2991                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2992                         break;
2993
2994                 udelay(5);
2995         }
2996
2997         if (j >= NVRAM_TIMEOUT_COUNT)
2998                 return -EBUSY;
2999
3000         return 0;
3001 }
3002
3003 static int
3004 bnx2_release_nvram_lock(struct bnx2 *bp)
3005 {
3006         int j;
3007         u32 val;
3008
3009         /* Relinquish nvram interface. */
3010         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3011
3012         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3013                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3014                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3015                         break;
3016
3017                 udelay(5);
3018         }
3019
3020         if (j >= NVRAM_TIMEOUT_COUNT)
3021                 return -EBUSY;
3022
3023         return 0;
3024 }
3025
3026
3027 static int
3028 bnx2_enable_nvram_write(struct bnx2 *bp)
3029 {
3030         u32 val;
3031
3032         val = REG_RD(bp, BNX2_MISC_CFG);
3033         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3034
3035         if (!bp->flash_info->buffered) {
3036                 int j;
3037
3038                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3039                 REG_WR(bp, BNX2_NVM_COMMAND,
3040                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3041
3042                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3043                         udelay(5);
3044
3045                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3046                         if (val & BNX2_NVM_COMMAND_DONE)
3047                                 break;
3048                 }
3049
3050                 if (j >= NVRAM_TIMEOUT_COUNT)
3051                         return -EBUSY;
3052         }
3053         return 0;
3054 }
3055
3056 static void
3057 bnx2_disable_nvram_write(struct bnx2 *bp)
3058 {
3059         u32 val;
3060
3061         val = REG_RD(bp, BNX2_MISC_CFG);
3062         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3063 }
3064
3065
3066 static void
3067 bnx2_enable_nvram_access(struct bnx2 *bp)
3068 {
3069         u32 val;
3070
3071         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3072         /* Enable both bits, even on read. */
3073         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3074                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3075 }
3076
3077 static void
3078 bnx2_disable_nvram_access(struct bnx2 *bp)
3079 {
3080         u32 val;
3081
3082         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3083         /* Disable both bits, even after read. */
3084         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3085                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3086                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3087 }
3088
3089 static int
3090 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3091 {
3092         u32 cmd;
3093         int j;
3094
3095         if (bp->flash_info->buffered)
3096                 /* Buffered flash, no erase needed */
3097                 return 0;
3098
3099         /* Build an erase command */
3100         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3101               BNX2_NVM_COMMAND_DOIT;
3102
3103         /* Need to clear DONE bit separately. */
3104         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3105
3106         /* Address of the NVRAM to read from. */
3107         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3108
3109         /* Issue an erase command. */
3110         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3111
3112         /* Wait for completion. */
3113         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3114                 u32 val;
3115
3116                 udelay(5);
3117
3118                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3119                 if (val & BNX2_NVM_COMMAND_DONE)
3120                         break;
3121         }
3122
3123         if (j >= NVRAM_TIMEOUT_COUNT)
3124                 return -EBUSY;
3125
3126         return 0;
3127 }
3128
3129 static int
3130 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3131 {
3132         u32 cmd;
3133         int j;
3134
3135         /* Build the command word. */
3136         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3137
3138         /* Calculate an offset of a buffered flash. */
3139         if (bp->flash_info->buffered) {
3140                 offset = ((offset / bp->flash_info->page_size) <<
3141                            bp->flash_info->page_bits) +
3142                           (offset % bp->flash_info->page_size);
3143         }
3144
3145         /* Need to clear DONE bit separately. */
3146         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3147
3148         /* Address of the NVRAM to read from. */
3149         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3150
3151         /* Issue a read command. */
3152         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3153
3154         /* Wait for completion. */
3155         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3156                 u32 val;
3157
3158                 udelay(5);
3159
3160                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3161                 if (val & BNX2_NVM_COMMAND_DONE) {
3162                         val = REG_RD(bp, BNX2_NVM_READ);
3163
3164                         val = be32_to_cpu(val);
3165                         memcpy(ret_val, &val, 4);
3166                         break;
3167                 }
3168         }
3169         if (j >= NVRAM_TIMEOUT_COUNT)
3170                 return -EBUSY;
3171
3172         return 0;
3173 }
3174
3175
3176 static int
3177 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3178 {
3179         u32 cmd, val32;
3180         int j;
3181
3182         /* Build the command word. */
3183         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3184
3185         /* Calculate an offset of a buffered flash. */
3186         if (bp->flash_info->buffered) {
3187                 offset = ((offset / bp->flash_info->page_size) <<
3188                           bp->flash_info->page_bits) +
3189                          (offset % bp->flash_info->page_size);
3190         }
3191
3192         /* Need to clear DONE bit separately. */
3193         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3194
3195         memcpy(&val32, val, 4);
3196         val32 = cpu_to_be32(val32);
3197
3198         /* Write the data. */
3199         REG_WR(bp, BNX2_NVM_WRITE, val32);
3200
3201         /* Address of the NVRAM to write to. */
3202         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3203
3204         /* Issue the write command. */
3205         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3206
3207         /* Wait for completion. */
3208         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3209                 udelay(5);
3210
3211                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3212                         break;
3213         }
3214         if (j >= NVRAM_TIMEOUT_COUNT)
3215                 return -EBUSY;
3216
3217         return 0;
3218 }
3219
3220 static int
3221 bnx2_init_nvram(struct bnx2 *bp)
3222 {
3223         u32 val;
3224         int j, entry_count, rc;
3225         struct flash_spec *flash;
3226
3227         /* Determine the selected interface. */
3228         val = REG_RD(bp, BNX2_NVM_CFG1);
3229
3230         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3231
3232         rc = 0;
3233         if (val & 0x40000000) {
3234
3235                 /* Flash interface has been reconfigured */
3236                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3237                      j++, flash++) {
3238                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3239                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3240                                 bp->flash_info = flash;
3241                                 break;
3242                         }
3243                 }
3244         }
3245         else {
3246                 u32 mask;
3247                 /* Not yet been reconfigured */
3248
3249                 if (val & (1 << 23))
3250                         mask = FLASH_BACKUP_STRAP_MASK;
3251                 else
3252                         mask = FLASH_STRAP_MASK;
3253
3254                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3255                         j++, flash++) {
3256
3257                         if ((val & mask) == (flash->strapping & mask)) {
3258                                 bp->flash_info = flash;
3259
3260                                 /* Request access to the flash interface. */
3261                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3262                                         return rc;
3263
3264                                 /* Enable access to flash interface */
3265                                 bnx2_enable_nvram_access(bp);
3266
3267                                 /* Reconfigure the flash interface */
3268                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3269                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3270                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3271                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3272
3273                                 /* Disable access to flash interface */
3274                                 bnx2_disable_nvram_access(bp);
3275                                 bnx2_release_nvram_lock(bp);
3276
3277                                 break;
3278                         }
3279                 }
3280         } /* if (val & 0x40000000) */
3281
3282         if (j == entry_count) {
3283                 bp->flash_info = NULL;
3284                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3285                 return -ENODEV;
3286         }
3287
3288         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3289         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3290         if (val)
3291                 bp->flash_size = val;
3292         else
3293                 bp->flash_size = bp->flash_info->total_size;
3294
3295         return rc;
3296 }
3297
3298 static int
3299 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3300                 int buf_size)
3301 {
3302         int rc = 0;
3303         u32 cmd_flags, offset32, len32, extra;
3304
3305         if (buf_size == 0)
3306                 return 0;
3307
3308         /* Request access to the flash interface. */
3309         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3310                 return rc;
3311
3312         /* Enable access to flash interface */
3313         bnx2_enable_nvram_access(bp);
3314
3315         len32 = buf_size;
3316         offset32 = offset;
3317         extra = 0;
3318
3319         cmd_flags = 0;
3320
3321         if (offset32 & 3) {
3322                 u8 buf[4];
3323                 u32 pre_len;
3324
3325                 offset32 &= ~3;
3326                 pre_len = 4 - (offset & 3);
3327
3328                 if (pre_len >= len32) {
3329                         pre_len = len32;
3330                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3331                                     BNX2_NVM_COMMAND_LAST;
3332                 }
3333                 else {
3334                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3335                 }
3336
3337                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3338
3339                 if (rc)
3340                         return rc;
3341
3342                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3343
3344                 offset32 += 4;
3345                 ret_buf += pre_len;
3346                 len32 -= pre_len;
3347         }
3348         if (len32 & 3) {
3349                 extra = 4 - (len32 & 3);
3350                 len32 = (len32 + 4) & ~3;
3351         }
3352
3353         if (len32 == 4) {
3354                 u8 buf[4];
3355
3356                 if (cmd_flags)
3357                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3358                 else
3359                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3360                                     BNX2_NVM_COMMAND_LAST;
3361
3362                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3363
3364                 memcpy(ret_buf, buf, 4 - extra);
3365         }
3366         else if (len32 > 0) {
3367                 u8 buf[4];
3368
3369                 /* Read the first word. */
3370                 if (cmd_flags)
3371                         cmd_flags = 0;
3372                 else
3373                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3374
3375                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3376
3377                 /* Advance to the next dword. */
3378                 offset32 += 4;
3379                 ret_buf += 4;
3380                 len32 -= 4;
3381
3382                 while (len32 > 4 && rc == 0) {
3383                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3384
3385                         /* Advance to the next dword. */
3386                         offset32 += 4;
3387                         ret_buf += 4;
3388                         len32 -= 4;
3389                 }
3390
3391                 if (rc)
3392                         return rc;
3393
3394                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3395                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3396
3397                 memcpy(ret_buf, buf, 4 - extra);
3398         }
3399
3400         /* Disable access to flash interface */
3401         bnx2_disable_nvram_access(bp);
3402
3403         bnx2_release_nvram_lock(bp);
3404
3405         return rc;
3406 }
3407
3408 static int
3409 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3410                 int buf_size)
3411 {
3412         u32 written, offset32, len32;
3413         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3414         int rc = 0;
3415         int align_start, align_end;
3416
3417         buf = data_buf;
3418         offset32 = offset;
3419         len32 = buf_size;
3420         align_start = align_end = 0;
3421
3422         if ((align_start = (offset32 & 3))) {
3423                 offset32 &= ~3;
3424                 len32 += align_start;
3425                 if (len32 < 4)
3426                         len32 = 4;
3427                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3428                         return rc;
3429         }
3430
3431         if (len32 & 3) {
3432                 align_end = 4 - (len32 & 3);
3433                 len32 += align_end;
3434                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3435                         return rc;
3436         }
3437
3438         if (align_start || align_end) {
3439                 align_buf = kmalloc(len32, GFP_KERNEL);
3440                 if (align_buf == NULL)
3441                         return -ENOMEM;
3442                 if (align_start) {
3443                         memcpy(align_buf, start, 4);
3444                 }
3445                 if (align_end) {
3446                         memcpy(align_buf + len32 - 4, end, 4);
3447                 }
3448                 memcpy(align_buf + align_start, data_buf, buf_size);
3449                 buf = align_buf;
3450         }
3451
3452         if (bp->flash_info->buffered == 0) {
3453                 flash_buffer = kmalloc(264, GFP_KERNEL);
3454                 if (flash_buffer == NULL) {
3455                         rc = -ENOMEM;
3456                         goto nvram_write_end;
3457                 }
3458         }
3459
3460         written = 0;
3461         while ((written < len32) && (rc == 0)) {
3462                 u32 page_start, page_end, data_start, data_end;
3463                 u32 addr, cmd_flags;
3464                 int i;
3465
3466                 /* Find the page_start addr */
3467                 page_start = offset32 + written;
3468                 page_start -= (page_start % bp->flash_info->page_size);
3469                 /* Find the page_end addr */
3470                 page_end = page_start + bp->flash_info->page_size;
3471                 /* Find the data_start addr */
3472                 data_start = (written == 0) ? offset32 : page_start;
3473                 /* Find the data_end addr */
3474                 data_end = (page_end > offset32 + len32) ?
3475                         (offset32 + len32) : page_end;
3476
3477                 /* Request access to the flash interface. */
3478                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3479                         goto nvram_write_end;
3480
3481                 /* Enable access to flash interface */
3482                 bnx2_enable_nvram_access(bp);
3483
3484                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3485                 if (bp->flash_info->buffered == 0) {
3486                         int j;
3487
3488                         /* Read the whole page into the buffer
3489                          * (non-buffer flash only) */
3490                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3491                                 if (j == (bp->flash_info->page_size - 4)) {
3492                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3493                                 }
3494                                 rc = bnx2_nvram_read_dword(bp,
3495                                         page_start + j,
3496                                         &flash_buffer[j],
3497                                         cmd_flags);
3498
3499                                 if (rc)
3500                                         goto nvram_write_end;
3501
3502                                 cmd_flags = 0;
3503                         }
3504                 }
3505
3506                 /* Enable writes to flash interface (unlock write-protect) */
3507                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3508                         goto nvram_write_end;
3509
3510                 /* Loop to write back the buffer data from page_start to
3511                  * data_start */
3512                 i = 0;
3513                 if (bp->flash_info->buffered == 0) {
3514                         /* Erase the page */
3515                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3516                                 goto nvram_write_end;
3517
3518                         /* Re-enable the write again for the actual write */
3519                         bnx2_enable_nvram_write(bp);
3520
3521                         for (addr = page_start; addr < data_start;
3522                                 addr += 4, i += 4) {
3523
3524                                 rc = bnx2_nvram_write_dword(bp, addr,
3525                                         &flash_buffer[i], cmd_flags);
3526
3527                                 if (rc != 0)
3528                                         goto nvram_write_end;
3529
3530                                 cmd_flags = 0;
3531                         }
3532                 }
3533
3534                 /* Loop to write the new data from data_start to data_end */
3535                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3536                         if ((addr == page_end - 4) ||
3537                                 ((bp->flash_info->buffered) &&
3538                                  (addr == data_end - 4))) {
3539
3540                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3541                         }
3542                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3543                                 cmd_flags);
3544
3545                         if (rc != 0)
3546                                 goto nvram_write_end;
3547
3548                         cmd_flags = 0;
3549                         buf += 4;
3550                 }
3551
3552                 /* Loop to write back the buffer data from data_end
3553                  * to page_end */
3554                 if (bp->flash_info->buffered == 0) {
3555                         for (addr = data_end; addr < page_end;
3556                                 addr += 4, i += 4) {
3557
3558                                 if (addr == page_end-4) {
3559                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3560                                 }
3561                                 rc = bnx2_nvram_write_dword(bp, addr,
3562                                         &flash_buffer[i], cmd_flags);
3563
3564                                 if (rc != 0)
3565                                         goto nvram_write_end;
3566
3567                                 cmd_flags = 0;
3568                         }
3569                 }
3570
3571                 /* Disable writes to flash interface (lock write-protect) */
3572                 bnx2_disable_nvram_write(bp);
3573
3574                 /* Disable access to flash interface */
3575                 bnx2_disable_nvram_access(bp);
3576                 bnx2_release_nvram_lock(bp);
3577
3578                 /* Increment written */
3579                 written += data_end - data_start;
3580         }
3581
3582 nvram_write_end:
3583         kfree(flash_buffer);
3584         kfree(align_buf);
3585         return rc;
3586 }
3587
3588 static int
3589 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3590 {
3591         u32 val;
3592         int i, rc = 0;
3593
3594         /* Wait for the current PCI transaction to complete before
3595          * issuing a reset. */
3596         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3597                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3598                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3599                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3600                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3601         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3602         udelay(5);
3603
3604         /* Wait for the firmware to tell us it is ok to issue a reset. */
3605         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3606
3607         /* Deposit a driver reset signature so the firmware knows that
3608          * this is a soft reset. */
3609         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3610                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3611
3612         /* Do a dummy read to force the chip to complete all current transaction
3613          * before we issue a reset. */
3614         val = REG_RD(bp, BNX2_MISC_ID);
3615
3616         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3617                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3618                 REG_RD(bp, BNX2_MISC_COMMAND);
3619                 udelay(5);
3620
3621                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3622                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3623
3624                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3625
3626         } else {
3627                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3628                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3629                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3630
3631                 /* Chip reset. */
3632                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3633
3634                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3635                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3636                         current->state = TASK_UNINTERRUPTIBLE;
3637                         schedule_timeout(HZ / 50);
3638                 }
3639
3640                 /* Reset takes approximate 30 usec */
3641                 for (i = 0; i < 10; i++) {
3642                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3643                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3644                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3645                                 break;
3646                         udelay(10);
3647                 }
3648
3649                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3650                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3651                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3652                         return -EBUSY;
3653                 }
3654         }
3655
3656         /* Make sure byte swapping is properly configured. */
3657         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3658         if (val != 0x01020304) {
3659                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3660                 return -ENODEV;
3661         }
3662
3663         /* Wait for the firmware to finish its initialization. */
3664         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3665         if (rc)
3666                 return rc;
3667
3668         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3669                 /* Adjust the voltage regular to two steps lower.  The default
3670                  * of this register is 0x0000000e. */
3671                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3672
3673                 /* Remove bad rbuf memory from the free pool. */
3674                 rc = bnx2_alloc_bad_rbuf(bp);
3675         }
3676
3677         return rc;
3678 }
3679
3680 static int
3681 bnx2_init_chip(struct bnx2 *bp)
3682 {
3683         u32 val;
3684         int rc;
3685
3686         /* Make sure the interrupt is not active. */
3687         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3688
3689         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3690               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3691 #ifdef __BIG_ENDIAN
3692               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3693 #endif
3694               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3695               DMA_READ_CHANS << 12 |
3696               DMA_WRITE_CHANS << 16;
3697
3698         val |= (0x2 << 20) | (1 << 11);
3699
3700         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3701                 val |= (1 << 23);
3702
3703         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3704             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3705                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3706
3707         REG_WR(bp, BNX2_DMA_CONFIG, val);
3708
3709         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3710                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3711                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3712                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3713         }
3714
3715         if (bp->flags & PCIX_FLAG) {
3716                 u16 val16;
3717
3718                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3719                                      &val16);
3720                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3721                                       val16 & ~PCI_X_CMD_ERO);
3722         }
3723
3724         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3725                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3726                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3727                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3728
3729         /* Initialize context mapping and zero out the quick contexts.  The
3730          * context block must have already been enabled. */
3731         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3732                 rc = bnx2_init_5709_context(bp);
3733                 if (rc)
3734                         return rc;
3735         } else
3736                 bnx2_init_context(bp);
3737
3738         if ((rc = bnx2_init_cpus(bp)) != 0)
3739                 return rc;
3740
3741         bnx2_init_nvram(bp);
3742
3743         bnx2_set_mac_addr(bp);
3744
3745         val = REG_RD(bp, BNX2_MQ_CONFIG);
3746         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3747         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3748         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3749                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3750
3751         REG_WR(bp, BNX2_MQ_CONFIG, val);
3752
3753         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3754         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3755         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3756
3757         val = (BCM_PAGE_BITS - 8) << 24;
3758         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3759
3760         /* Configure page size. */
3761         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3762         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3763         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3764         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3765
3766         val = bp->mac_addr[0] +
3767               (bp->mac_addr[1] << 8) +
3768               (bp->mac_addr[2] << 16) +
3769               bp->mac_addr[3] +
3770               (bp->mac_addr[4] << 8) +
3771               (bp->mac_addr[5] << 16);
3772         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3773
3774         /* Program the MTU.  Also include 4 bytes for CRC32. */
3775         val = bp->dev->mtu + ETH_HLEN + 4;
3776         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3777                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3778         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3779
3780         bp->last_status_idx = 0;
3781         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3782
3783         /* Set up how to generate a link change interrupt. */
3784         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3785
3786         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3787                (u64) bp->status_blk_mapping & 0xffffffff);
3788         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3789
3790         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3791                (u64) bp->stats_blk_mapping & 0xffffffff);
3792         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3793                (u64) bp->stats_blk_mapping >> 32);
3794
3795         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3796                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3797
3798         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3799                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3800
3801         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3802                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3803
3804         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3805
3806         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3807
3808         REG_WR(bp, BNX2_HC_COM_TICKS,
3809                (bp->com_ticks_int << 16) | bp->com_ticks);
3810
3811         REG_WR(bp, BNX2_HC_CMD_TICKS,
3812                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3813
3814         if (CHIP_NUM(bp) == CHIP_NUM_5708)
3815                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
3816         else
3817                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3818         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3819
3820         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3821                 val = BNX2_HC_CONFIG_COLLECT_STATS;
3822         else {
3823                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3824                       BNX2_HC_CONFIG_COLLECT_STATS;
3825         }
3826
3827         if (bp->flags & ONE_SHOT_MSI_FLAG)
3828                 val |= BNX2_HC_CONFIG_ONE_SHOT;
3829
3830         REG_WR(bp, BNX2_HC_CONFIG, val);
3831
3832         /* Clear internal stats counters. */
3833         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3834
3835         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3836
3837         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3838             BNX2_PORT_FEATURE_ASF_ENABLED)
3839                 bp->flags |= ASF_ENABLE_FLAG;
3840
3841         /* Initialize the receive filter. */
3842         bnx2_set_rx_mode(bp->dev);
3843
3844         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3845                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
3846                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
3847                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
3848         }
3849         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3850                           0);
3851
3852         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3853         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3854
3855         udelay(20);
3856
3857         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3858
3859         return rc;
3860 }
3861
3862 static void
3863 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3864 {
3865         u32 val, offset0, offset1, offset2, offset3;
3866
3867         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3868                 offset0 = BNX2_L2CTX_TYPE_XI;
3869                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3870                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3871                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3872         } else {
3873                 offset0 = BNX2_L2CTX_TYPE;
3874                 offset1 = BNX2_L2CTX_CMD_TYPE;
3875                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3876                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3877         }
3878         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3879         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3880
3881         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3882         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3883
3884         val = (u64) bp->tx_desc_mapping >> 32;
3885         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3886
3887         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3888         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3889 }
3890
3891 static void
3892 bnx2_init_tx_ring(struct bnx2 *bp)
3893 {
3894         struct tx_bd *txbd;
3895         u32 cid;
3896
3897         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3898
3899         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3900
3901         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3902         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3903
3904         bp->tx_prod = 0;
3905         bp->tx_cons = 0;
3906         bp->hw_tx_cons = 0;
3907         bp->tx_prod_bseq = 0;
3908
3909         cid = TX_CID;
3910         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3911         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3912
3913         bnx2_init_tx_context(bp, cid);
3914 }
3915
3916 static void
3917 bnx2_init_rx_ring(struct bnx2 *bp)
3918 {
3919         struct rx_bd *rxbd;
3920         int i;
3921         u16 prod, ring_prod;
3922         u32 val;
3923
3924         /* 8 for CRC and VLAN */
3925         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3926         /* hw alignment */
3927         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3928
3929         ring_prod = prod = bp->rx_prod = 0;
3930         bp->rx_cons = 0;
3931         bp->hw_rx_cons = 0;
3932         bp->rx_prod_bseq = 0;
3933
3934         for (i = 0; i < bp->rx_max_ring; i++) {
3935                 int j;
3936
3937                 rxbd = &bp->rx_desc_ring[i][0];
3938                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3939                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3940                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3941                 }
3942                 if (i == (bp->rx_max_ring - 1))
3943                         j = 0;
3944                 else
3945                         j = i + 1;
3946                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3947                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3948                                        0xffffffff;
3949         }
3950
3951         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3952         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3953         val |= 0x02 << 8;
3954         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3955
3956         val = (u64) bp->rx_desc_mapping[0] >> 32;
3957         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3958
3959         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3960         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3961
3962         for (i = 0; i < bp->rx_ring_size; i++) {
3963                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3964                         break;
3965                 }
3966                 prod = NEXT_RX_BD(prod);
3967                 ring_prod = RX_RING_IDX(prod);
3968         }
3969         bp->rx_prod = prod;
3970
3971         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3972
3973         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3974 }
3975
3976 static void
3977 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3978 {
3979         u32 num_rings, max;
3980
3981         bp->rx_ring_size = size;
3982         num_rings = 1;
3983         while (size > MAX_RX_DESC_CNT) {
3984                 size -= MAX_RX_DESC_CNT;
3985                 num_rings++;
3986         }
3987         /* round to next power of 2 */
3988         max = MAX_RX_RINGS;
3989         while ((max & num_rings) == 0)
3990                 max >>= 1;
3991
3992         if (num_rings != max)
3993                 max <<= 1;
3994
3995         bp->rx_max_ring = max;
3996         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3997 }
3998
3999 static void
4000 bnx2_free_tx_skbs(struct bnx2 *bp)
4001 {
4002         int i;
4003
4004         if (bp->tx_buf_ring == NULL)
4005                 return;
4006
4007         for (i = 0; i < TX_DESC_CNT; ) {
4008                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4009                 struct sk_buff *skb = tx_buf->skb;
4010                 int j, last;
4011
4012                 if (skb == NULL) {
4013                         i++;
4014                         continue;
4015                 }
4016
4017                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4018                         skb_headlen(skb), PCI_DMA_TODEVICE);
4019
4020                 tx_buf->skb = NULL;
4021
4022                 last = skb_shinfo(skb)->nr_frags;
4023                 for (j = 0; j < last; j++) {
4024                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4025                         pci_unmap_page(bp->pdev,
4026                                 pci_unmap_addr(tx_buf, mapping),
4027                                 skb_shinfo(skb)->frags[j].size,
4028                                 PCI_DMA_TODEVICE);
4029                 }
4030                 dev_kfree_skb(skb);
4031                 i += j + 1;
4032         }
4033
4034 }
4035
4036 static void
4037 bnx2_free_rx_skbs(struct bnx2 *bp)
4038 {
4039         int i;
4040
4041         if (bp->rx_buf_ring == NULL)
4042                 return;
4043
4044         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4045                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4046                 struct sk_buff *skb = rx_buf->skb;
4047
4048                 if (skb == NULL)
4049                         continue;
4050
4051                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4052                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4053
4054                 rx_buf->skb = NULL;
4055
4056                 dev_kfree_skb(skb);
4057         }
4058 }
4059
4060 static void
4061 bnx2_free_skbs(struct bnx2 *bp)
4062 {
4063         bnx2_free_tx_skbs(bp);
4064         bnx2_free_rx_skbs(bp);
4065 }
4066
4067 static int
4068 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4069 {
4070         int rc;
4071
4072         rc = bnx2_reset_chip(bp, reset_code);
4073         bnx2_free_skbs(bp);
4074         if (rc)
4075                 return rc;
4076
4077         if ((rc = bnx2_init_chip(bp)) != 0)
4078                 return rc;
4079
4080         bnx2_init_tx_ring(bp);
4081         bnx2_init_rx_ring(bp);
4082         return 0;
4083 }
4084
4085 static int
4086 bnx2_init_nic(struct bnx2 *bp)
4087 {
4088         int rc;
4089
4090         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4091                 return rc;
4092
4093         spin_lock_bh(&bp->phy_lock);
4094         bnx2_init_phy(bp);
4095         spin_unlock_bh(&bp->phy_lock);
4096         bnx2_set_link(bp);
4097         return 0;
4098 }
4099
4100 static int
4101 bnx2_test_registers(struct bnx2 *bp)
4102 {
4103         int ret;
4104         int i, is_5709;
4105         static const struct {
4106                 u16   offset;
4107                 u16   flags;
4108 #define BNX2_FL_NOT_5709        1
4109                 u32   rw_mask;
4110                 u32   ro_mask;
4111         } reg_tbl[] = {
4112                 { 0x006c, 0, 0x00000000, 0x0000003f },
4113                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4114                 { 0x0094, 0, 0x00000000, 0x00000000 },
4115
4116                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4117                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4118                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4119                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4120                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4121                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4122                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4123                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4124                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4125
4126                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4127                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4128                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4129                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4130                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4131                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4132
4133                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4134                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4135                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4136
4137                 { 0x1000, 0, 0x00000000, 0x00000001 },
4138                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4139
4140                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4141                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4142                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4143                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4144                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4145                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4146                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4147                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4148                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4149                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4150
4151                 { 0x1800, 0, 0x00000000, 0x00000001 },
4152                 { 0x1804, 0, 0x00000000, 0x00000003 },
4153
4154                 { 0x2800, 0, 0x00000000, 0x00000001 },
4155                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4156                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4157                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4158                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4159                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4160                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4161                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4162                 { 0x2840, 0, 0x00000000, 0xffffffff },
4163                 { 0x2844, 0, 0x00000000, 0xffffffff },
4164                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4165                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4166
4167                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4168                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4169
4170                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4171                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4172                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4173                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4174                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4175                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4176                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4177                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4178                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4179
4180                 { 0x5004, 0, 0x00000000, 0x0000007f },
4181                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4182
4183                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4184                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4185                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4186                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4187                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4188                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4189                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4190                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4191                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4192
4193                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4194                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4195                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4196                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4197                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4198                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4199                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4200                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4201                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4202                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4203                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4204                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4205                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4206                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4207                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4208                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4209                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4210                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4211                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4212                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4213                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4214                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4215                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4216
4217                 { 0xffff, 0, 0x00000000, 0x00000000 },
4218         };
4219
4220         ret = 0;
4221         is_5709 = 0;
4222         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4223                 is_5709 = 1;
4224
4225         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4226                 u32 offset, rw_mask, ro_mask, save_val, val;
4227                 u16 flags = reg_tbl[i].flags;
4228
4229                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4230                         continue;
4231
4232                 offset = (u32) reg_tbl[i].offset;
4233                 rw_mask = reg_tbl[i].rw_mask;
4234                 ro_mask = reg_tbl[i].ro_mask;
4235
4236                 save_val = readl(bp->regview + offset);
4237
4238                 writel(0, bp->regview + offset);
4239
4240                 val = readl(bp->regview + offset);
4241                 if ((val & rw_mask) != 0) {
4242                         goto reg_test_err;
4243                 }
4244
4245                 if ((val & ro_mask) != (save_val & ro_mask)) {
4246                         goto reg_test_err;
4247                 }
4248
4249                 writel(0xffffffff, bp->regview + offset);
4250
4251                 val = readl(bp->regview + offset);
4252                 if ((val & rw_mask) != rw_mask) {
4253                         goto reg_test_err;
4254                 }
4255
4256                 if ((val & ro_mask) != (save_val & ro_mask)) {
4257                         goto reg_test_err;
4258                 }
4259
4260                 writel(save_val, bp->regview + offset);
4261                 continue;
4262
4263 reg_test_err:
4264                 writel(save_val, bp->regview + offset);
4265                 ret = -ENODEV;
4266                 break;
4267         }
4268         return ret;
4269 }
4270
4271 static int
4272 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4273 {
4274         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4275                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4276         int i;
4277
4278         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4279                 u32 offset;
4280
4281                 for (offset = 0; offset < size; offset += 4) {
4282
4283                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4284
4285                         if (REG_RD_IND(bp, start + offset) !=
4286                                 test_pattern[i]) {
4287                                 return -ENODEV;
4288                         }
4289                 }
4290         }
4291         return 0;
4292 }
4293
4294 static int
4295 bnx2_test_memory(struct bnx2 *bp)
4296 {
4297         int ret = 0;
4298         int i;
4299         static struct mem_entry {
4300                 u32   offset;
4301                 u32   len;
4302         } mem_tbl_5706[] = {
4303                 { 0x60000,  0x4000 },
4304                 { 0xa0000,  0x3000 },
4305                 { 0xe0000,  0x4000 },
4306                 { 0x120000, 0x4000 },
4307                 { 0x1a0000, 0x4000 },
4308                 { 0x160000, 0x4000 },
4309                 { 0xffffffff, 0    },
4310         },
4311         mem_tbl_5709[] = {
4312                 { 0x60000,  0x4000 },
4313                 { 0xa0000,  0x3000 },
4314                 { 0xe0000,  0x4000 },
4315                 { 0x120000, 0x4000 },
4316                 { 0x1a0000, 0x4000 },
4317                 { 0xffffffff, 0    },
4318         };
4319         struct mem_entry *mem_tbl;
4320
4321         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4322                 mem_tbl = mem_tbl_5709;
4323         else
4324                 mem_tbl = mem_tbl_5706;
4325
4326         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4327                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4328                         mem_tbl[i].len)) != 0) {
4329                         return ret;
4330                 }
4331         }
4332
4333         return ret;
4334 }
4335
4336 #define BNX2_MAC_LOOPBACK       0
4337 #define BNX2_PHY_LOOPBACK       1
4338
4339 static int
4340 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4341 {
4342         unsigned int pkt_size, num_pkts, i;
4343         struct sk_buff *skb, *rx_skb;
4344         unsigned char *packet;
4345         u16 rx_start_idx, rx_idx;
4346         dma_addr_t map;
4347         struct tx_bd *txbd;
4348         struct sw_bd *rx_buf;
4349         struct l2_fhdr *rx_hdr;
4350         int ret = -ENODEV;
4351
4352         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4353                 bp->loopback = MAC_LOOPBACK;
4354                 bnx2_set_mac_loopback(bp);
4355         }
4356         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4357                 bp->loopback = PHY_LOOPBACK;
4358                 bnx2_set_phy_loopback(bp);
4359         }
4360         else
4361                 return -EINVAL;
4362
4363         pkt_size = 1514;
4364         skb = netdev_alloc_skb(bp->dev, pkt_size);
4365         if (!skb)
4366                 return -ENOMEM;
4367         packet = skb_put(skb, pkt_size);
4368         memcpy(packet, bp->dev->dev_addr, 6);
4369         memset(packet + 6, 0x0, 8);
4370         for (i = 14; i < pkt_size; i++)
4371                 packet[i] = (unsigned char) (i & 0xff);
4372
4373         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4374                 PCI_DMA_TODEVICE);
4375
4376         REG_WR(bp, BNX2_HC_COMMAND,
4377                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4378
4379         REG_RD(bp, BNX2_HC_COMMAND);
4380
4381         udelay(5);
4382         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4383
4384         num_pkts = 0;
4385
4386         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4387
4388         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4389         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4390         txbd->tx_bd_mss_nbytes = pkt_size;
4391         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4392
4393         num_pkts++;
4394         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4395         bp->tx_prod_bseq += pkt_size;
4396
4397         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4398         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4399
4400         udelay(100);
4401
4402         REG_WR(bp, BNX2_HC_COMMAND,
4403                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4404
4405         REG_RD(bp, BNX2_HC_COMMAND);
4406
4407         udelay(5);
4408
4409         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4410         dev_kfree_skb(skb);
4411
4412         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4413                 goto loopback_test_done;
4414         }
4415
4416         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4417         if (rx_idx != rx_start_idx + num_pkts) {
4418                 goto loopback_test_done;
4419         }
4420
4421         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4422         rx_skb = rx_buf->skb;
4423
4424         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4425         skb_reserve(rx_skb, bp->rx_offset);
4426
4427         pci_dma_sync_single_for_cpu(bp->pdev,
4428                 pci_unmap_addr(rx_buf, mapping),
4429                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4430
4431         if (rx_hdr->l2_fhdr_status &
4432                 (L2_FHDR_ERRORS_BAD_CRC |
4433                 L2_FHDR_ERRORS_PHY_DECODE |
4434                 L2_FHDR_ERRORS_ALIGNMENT |
4435                 L2_FHDR_ERRORS_TOO_SHORT |
4436                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4437
4438                 goto loopback_test_done;
4439         }
4440
4441         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4442                 goto loopback_test_done;
4443         }
4444
4445         for (i = 14; i < pkt_size; i++) {
4446                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4447                         goto loopback_test_done;
4448                 }
4449         }
4450
4451         ret = 0;
4452
4453 loopback_test_done:
4454         bp->loopback = 0;
4455         return ret;
4456 }
4457
4458 #define BNX2_MAC_LOOPBACK_FAILED        1
4459 #define BNX2_PHY_LOOPBACK_FAILED        2
4460 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4461                                          BNX2_PHY_LOOPBACK_FAILED)
4462
4463 static int
4464 bnx2_test_loopback(struct bnx2 *bp)
4465 {
4466         int rc = 0;
4467
4468         if (!netif_running(bp->dev))
4469                 return BNX2_LOOPBACK_FAILED;
4470
4471         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4472         spin_lock_bh(&bp->phy_lock);
4473         bnx2_init_phy(bp);
4474         spin_unlock_bh(&bp->phy_lock);
4475         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4476                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4477         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4478                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4479         return rc;
4480 }
4481
4482 #define NVRAM_SIZE 0x200
4483 #define CRC32_RESIDUAL 0xdebb20e3
4484
4485 static int
4486 bnx2_test_nvram(struct bnx2 *bp)
4487 {
4488         u32 buf[NVRAM_SIZE / 4];
4489         u8 *data = (u8 *) buf;
4490         int rc = 0;
4491         u32 magic, csum;
4492
4493         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4494                 goto test_nvram_done;
4495
4496         magic = be32_to_cpu(buf[0]);
4497         if (magic != 0x669955aa) {
4498                 rc = -ENODEV;
4499                 goto test_nvram_done;
4500         }
4501
4502         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4503                 goto test_nvram_done;
4504
4505         csum = ether_crc_le(0x100, data);
4506         if (csum != CRC32_RESIDUAL) {
4507                 rc = -ENODEV;
4508                 goto test_nvram_done;
4509         }
4510
4511         csum = ether_crc_le(0x100, data + 0x100);
4512         if (csum != CRC32_RESIDUAL) {
4513                 rc = -ENODEV;
4514         }
4515
4516 test_nvram_done:
4517         return rc;
4518 }
4519
4520 static int
4521 bnx2_test_link(struct bnx2 *bp)
4522 {
4523         u32 bmsr;
4524
4525         spin_lock_bh(&bp->phy_lock);
4526         bnx2_enable_bmsr1(bp);
4527         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4528         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4529         bnx2_disable_bmsr1(bp);
4530         spin_unlock_bh(&bp->phy_lock);
4531
4532         if (bmsr & BMSR_LSTATUS) {
4533                 return 0;
4534         }
4535         return -ENODEV;
4536 }
4537
4538 static int
4539 bnx2_test_intr(struct bnx2 *bp)
4540 {
4541         int i;
4542         u16 status_idx;
4543
4544         if (!netif_running(bp->dev))
4545                 return -ENODEV;
4546
4547         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4548
4549         /* This register is not touched during run-time. */
4550         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4551         REG_RD(bp, BNX2_HC_COMMAND);
4552
4553         for (i = 0; i < 10; i++) {
4554                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4555                         status_idx) {
4556
4557                         break;
4558                 }
4559
4560                 msleep_interruptible(10);
4561         }
4562         if (i < 10)
4563                 return 0;
4564
4565         return -ENODEV;
4566 }
4567
4568 static void
4569 bnx2_5706_serdes_timer(struct bnx2 *bp)
4570 {
4571         spin_lock(&bp->phy_lock);
4572         if (bp->serdes_an_pending)
4573                 bp->serdes_an_pending--;
4574         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4575                 u32 bmcr;
4576
4577                 bp->current_interval = bp->timer_interval;
4578
4579                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4580
4581                 if (bmcr & BMCR_ANENABLE) {
4582                         u32 phy1, phy2;
4583
4584                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4585                         bnx2_read_phy(bp, 0x1c, &phy1);
4586
4587                         bnx2_write_phy(bp, 0x17, 0x0f01);
4588                         bnx2_read_phy(bp, 0x15, &phy2);
4589                         bnx2_write_phy(bp, 0x17, 0x0f01);
4590                         bnx2_read_phy(bp, 0x15, &phy2);
4591
4592                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4593                                 !(phy2 & 0x20)) {       /* no CONFIG */
4594
4595                                 bmcr &= ~BMCR_ANENABLE;
4596                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4597                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4598                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4599                         }
4600                 }
4601         }
4602         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4603                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4604                 u32 phy2;
4605
4606                 bnx2_write_phy(bp, 0x17, 0x0f01);
4607                 bnx2_read_phy(bp, 0x15, &phy2);
4608                 if (phy2 & 0x20) {
4609                         u32 bmcr;
4610
4611                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4612                         bmcr |= BMCR_ANENABLE;
4613                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4614
4615                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4616                 }
4617         } else
4618                 bp->current_interval = bp->timer_interval;
4619
4620         spin_unlock(&bp->phy_lock);
4621 }
4622
4623 static void
4624 bnx2_5708_serdes_timer(struct bnx2 *bp)
4625 {
4626         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4627                 bp->serdes_an_pending = 0;
4628                 return;
4629         }
4630
4631         spin_lock(&bp->phy_lock);
4632         if (bp->serdes_an_pending)
4633                 bp->serdes_an_pending--;
4634         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4635                 u32 bmcr;
4636
4637                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4638                 if (bmcr & BMCR_ANENABLE) {
4639                         bnx2_enable_forced_2g5(bp);
4640                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4641                 } else {
4642                         bnx2_disable_forced_2g5(bp);
4643                         bp->serdes_an_pending = 2;
4644                         bp->current_interval = bp->timer_interval;
4645                 }
4646
4647         } else
4648                 bp->current_interval = bp->timer_interval;
4649
4650         spin_unlock(&bp->phy_lock);
4651 }
4652
4653 static void
4654 bnx2_timer(unsigned long data)
4655 {
4656         struct bnx2 *bp = (struct bnx2 *) data;
4657         u32 msg;
4658
4659         if (!netif_running(bp->dev))
4660                 return;
4661
4662         if (atomic_read(&bp->intr_sem) != 0)
4663                 goto bnx2_restart_timer;
4664
4665         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4666         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4667
4668         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4669
4670         /* workaround occasional corrupted counters */
4671         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4672                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4673                                             BNX2_HC_COMMAND_STATS_NOW);
4674
4675         if (bp->phy_flags & PHY_SERDES_FLAG) {
4676                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4677                         bnx2_5706_serdes_timer(bp);
4678                 else
4679                         bnx2_5708_serdes_timer(bp);
4680         }
4681
4682 bnx2_restart_timer:
4683         mod_timer(&bp->timer, jiffies + bp->current_interval);
4684 }
4685
4686 static int
4687 bnx2_request_irq(struct bnx2 *bp)
4688 {
4689         struct net_device *dev = bp->dev;
4690         int rc = 0;
4691
4692         if (bp->flags & USING_MSI_FLAG) {
4693                 irq_handler_t   fn = bnx2_msi;
4694
4695                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4696                         fn = bnx2_msi_1shot;
4697
4698                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4699         } else
4700                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4701                                  IRQF_SHARED, dev->name, dev);
4702         return rc;
4703 }
4704
4705 static void
4706 bnx2_free_irq(struct bnx2 *bp)
4707 {
4708         struct net_device *dev = bp->dev;
4709
4710         if (bp->flags & USING_MSI_FLAG) {
4711                 free_irq(bp->pdev->irq, dev);
4712                 pci_disable_msi(bp->pdev);
4713                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4714         } else
4715                 free_irq(bp->pdev->irq, dev);
4716 }
4717
4718 /* Called with rtnl_lock */
4719 static int
4720 bnx2_open(struct net_device *dev)
4721 {
4722         struct bnx2 *bp = netdev_priv(dev);
4723         int rc;
4724
4725         netif_carrier_off(dev);
4726
4727         bnx2_set_power_state(bp, PCI_D0);
4728         bnx2_disable_int(bp);
4729
4730         rc = bnx2_alloc_mem(bp);
4731         if (rc)
4732                 return rc;
4733
4734         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4735                 if (pci_enable_msi(bp->pdev) == 0) {
4736                         bp->flags |= USING_MSI_FLAG;
4737                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4738                                 bp->flags |= ONE_SHOT_MSI_FLAG;
4739                 }
4740         }
4741         rc = bnx2_request_irq(bp);
4742
4743         if (rc) {
4744                 bnx2_free_mem(bp);
4745                 return rc;
4746         }
4747
4748         rc = bnx2_init_nic(bp);
4749
4750         if (rc) {
4751                 bnx2_free_irq(bp);
4752                 bnx2_free_skbs(bp);
4753                 bnx2_free_mem(bp);
4754                 return rc;
4755         }
4756
4757         mod_timer(&bp->timer, jiffies + bp->current_interval);
4758
4759         atomic_set(&bp->intr_sem, 0);
4760
4761         bnx2_enable_int(bp);
4762
4763         if (bp->flags & USING_MSI_FLAG) {
4764                 /* Test MSI to make sure it is working
4765                  * If MSI test fails, go back to INTx mode
4766                  */
4767                 if (bnx2_test_intr(bp) != 0) {
4768                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4769                                " using MSI, switching to INTx mode. Please"
4770                                " report this failure to the PCI maintainer"
4771                                " and include system chipset information.\n",
4772                                bp->dev->name);
4773
4774                         bnx2_disable_int(bp);
4775                         bnx2_free_irq(bp);
4776
4777                         rc = bnx2_init_nic(bp);
4778
4779                         if (!rc)
4780                                 rc = bnx2_request_irq(bp);
4781
4782                         if (rc) {
4783                                 bnx2_free_skbs(bp);
4784                                 bnx2_free_mem(bp);
4785                                 del_timer_sync(&bp->timer);
4786                                 return rc;
4787                         }
4788                         bnx2_enable_int(bp);
4789                 }
4790         }
4791         if (bp->flags & USING_MSI_FLAG) {
4792                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4793         }
4794
4795         netif_start_queue(dev);
4796
4797         return 0;
4798 }
4799
4800 static void
4801 bnx2_reset_task(struct work_struct *work)
4802 {
4803         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4804
4805         if (!netif_running(bp->dev))
4806                 return;
4807
4808         bp->in_reset_task = 1;
4809         bnx2_netif_stop(bp);
4810
4811         bnx2_init_nic(bp);
4812
4813         atomic_set(&bp->intr_sem, 1);
4814         bnx2_netif_start(bp);
4815         bp->in_reset_task = 0;
4816 }
4817
4818 static void
4819 bnx2_tx_timeout(struct net_device *dev)
4820 {
4821         struct bnx2 *bp = netdev_priv(dev);
4822
4823         /* This allows the netif to be shutdown gracefully before resetting */
4824         schedule_work(&bp->reset_task);
4825 }
4826
4827 #ifdef BCM_VLAN
4828 /* Called with rtnl_lock */
4829 static void
4830 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4831 {
4832         struct bnx2 *bp = netdev_priv(dev);
4833
4834         bnx2_netif_stop(bp);
4835
4836         bp->vlgrp = vlgrp;
4837         bnx2_set_rx_mode(dev);
4838
4839         bnx2_netif_start(bp);
4840 }
4841 #endif
4842
4843 /* Called with netif_tx_lock.
4844  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4845  * netif_wake_queue().
4846  */
4847 static int
4848 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4849 {
4850         struct bnx2 *bp = netdev_priv(dev);
4851         dma_addr_t mapping;
4852         struct tx_bd *txbd;
4853         struct sw_bd *tx_buf;
4854         u32 len, vlan_tag_flags, last_frag, mss;
4855         u16 prod, ring_prod;
4856         int i;
4857
4858         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4859                 netif_stop_queue(dev);
4860                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4861                         dev->name);
4862
4863                 return NETDEV_TX_BUSY;
4864         }
4865         len = skb_headlen(skb);
4866         prod = bp->tx_prod;
4867         ring_prod = TX_RING_IDX(prod);
4868
4869         vlan_tag_flags = 0;
4870         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4871                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4872         }
4873
4874         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4875                 vlan_tag_flags |=
4876                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4877         }
4878         if ((mss = skb_shinfo(skb)->gso_size)) {
4879                 u32 tcp_opt_len, ip_tcp_len;
4880                 struct iphdr *iph;
4881
4882                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4883
4884                 tcp_opt_len = tcp_optlen(skb);
4885
4886                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4887                         u32 tcp_off = skb_transport_offset(skb) -
4888                                       sizeof(struct ipv6hdr) - ETH_HLEN;
4889
4890                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4891                                           TX_BD_FLAGS_SW_FLAGS;
4892                         if (likely(tcp_off == 0))
4893                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4894                         else {
4895                                 tcp_off >>= 3;
4896                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
4897                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
4898                                                   ((tcp_off & 0x10) <<
4899                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
4900                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4901                         }
4902                 } else {
4903                         if (skb_header_cloned(skb) &&
4904                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4905                                 dev_kfree_skb(skb);
4906                                 return NETDEV_TX_OK;
4907                         }
4908
4909                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4910
4911                         iph = ip_hdr(skb);
4912                         iph->check = 0;
4913                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4914                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4915                                                                  iph->daddr, 0,
4916                                                                  IPPROTO_TCP,
4917                                                                  0);
4918                         if (tcp_opt_len || (iph->ihl > 5)) {
4919                                 vlan_tag_flags |= ((iph->ihl - 5) +
4920                                                    (tcp_opt_len >> 2)) << 8;
4921                         }
4922                 }
4923         } else
4924                 mss = 0;
4925
4926         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4927
4928         tx_buf = &bp->tx_buf_ring[ring_prod];
4929         tx_buf->skb = skb;
4930         pci_unmap_addr_set(tx_buf, mapping, mapping);
4931
4932         txbd = &bp->tx_desc_ring[ring_prod];
4933
4934         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4935         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4936         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4937         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4938
4939         last_frag = skb_shinfo(skb)->nr_frags;
4940
4941         for (i = 0; i < last_frag; i++) {
4942                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4943
4944                 prod = NEXT_TX_BD(prod);
4945                 ring_prod = TX_RING_IDX(prod);
4946                 txbd = &bp->tx_desc_ring[ring_prod];
4947
4948                 len = frag->size;
4949                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4950                         len, PCI_DMA_TODEVICE);
4951                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4952                                 mapping, mapping);
4953
4954                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4955                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4956                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4957                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4958
4959         }
4960         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4961
4962         prod = NEXT_TX_BD(prod);
4963         bp->tx_prod_bseq += skb->len;
4964
4965         REG_WR16(bp, bp->tx_bidx_addr, prod);
4966         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4967
4968         mmiowb();
4969
4970         bp->tx_prod = prod;
4971         dev->trans_start = jiffies;
4972
4973         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4974                 netif_stop_queue(dev);
4975                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4976                         netif_wake_queue(dev);
4977         }
4978
4979         return NETDEV_TX_OK;
4980 }
4981
4982 /* Called with rtnl_lock */
4983 static int
4984 bnx2_close(struct net_device *dev)
4985 {
4986         struct bnx2 *bp = netdev_priv(dev);
4987         u32 reset_code;
4988
4989         /* Calling flush_scheduled_work() may deadlock because
4990          * linkwatch_event() may be on the workqueue and it will try to get
4991          * the rtnl_lock which we are holding.
4992          */
4993         while (bp->in_reset_task)
4994                 msleep(1);
4995
4996         bnx2_netif_stop(bp);
4997         del_timer_sync(&bp->timer);
4998         if (bp->flags & NO_WOL_FLAG)
4999                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5000         else if (bp->wol)
5001                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5002         else
5003                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5004         bnx2_reset_chip(bp, reset_code);
5005         bnx2_free_irq(bp);
5006         bnx2_free_skbs(bp);
5007         bnx2_free_mem(bp);
5008         bp->link_up = 0;
5009         netif_carrier_off(bp->dev);
5010         bnx2_set_power_state(bp, PCI_D3hot);
5011         return 0;
5012 }
5013
5014 #define GET_NET_STATS64(ctr)                                    \
5015         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5016         (unsigned long) (ctr##_lo)
5017
5018 #define GET_NET_STATS32(ctr)            \
5019         (ctr##_lo)
5020
5021 #if (BITS_PER_LONG == 64)
5022 #define GET_NET_STATS   GET_NET_STATS64
5023 #else
5024 #define GET_NET_STATS   GET_NET_STATS32
5025 #endif
5026
5027 static struct net_device_stats *
5028 bnx2_get_stats(struct net_device *dev)
5029 {
5030         struct bnx2 *bp = netdev_priv(dev);
5031         struct statistics_block *stats_blk = bp->stats_blk;
5032         struct net_device_stats *net_stats = &bp->net_stats;
5033
5034         if (bp->stats_blk == NULL) {
5035                 return net_stats;
5036         }
5037         net_stats->rx_packets =
5038                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5039                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5040                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5041
5042         net_stats->tx_packets =
5043                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5044                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5045                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5046
5047         net_stats->rx_bytes =
5048                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5049
5050         net_stats->tx_bytes =
5051                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5052
5053         net_stats->multicast =
5054                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5055
5056         net_stats->collisions =
5057                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5058
5059         net_stats->rx_length_errors =
5060                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5061                 stats_blk->stat_EtherStatsOverrsizePkts);
5062
5063         net_stats->rx_over_errors =
5064                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5065
5066         net_stats->rx_frame_errors =
5067                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5068
5069         net_stats->rx_crc_errors =
5070                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5071
5072         net_stats->rx_errors = net_stats->rx_length_errors +
5073                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5074                 net_stats->rx_crc_errors;
5075
5076         net_stats->tx_aborted_errors =
5077                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5078                 stats_blk->stat_Dot3StatsLateCollisions);
5079
5080         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5081             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5082                 net_stats->tx_carrier_errors = 0;
5083         else {
5084                 net_stats->tx_carrier_errors =
5085                         (unsigned long)
5086                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5087         }
5088
5089         net_stats->tx_errors =
5090                 (unsigned long)
5091                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5092                 +
5093                 net_stats->tx_aborted_errors +
5094                 net_stats->tx_carrier_errors;
5095
5096         net_stats->rx_missed_errors =
5097                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5098                 stats_blk->stat_FwRxDrop);
5099
5100         return net_stats;
5101 }
5102
5103 /* All ethtool functions called with rtnl_lock */
5104
5105 static int
5106 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5107 {
5108         struct bnx2 *bp = netdev_priv(dev);
5109
5110         cmd->supported = SUPPORTED_Autoneg;
5111         if (bp->phy_flags & PHY_SERDES_FLAG) {
5112                 cmd->supported |= SUPPORTED_1000baseT_Full |
5113                         SUPPORTED_FIBRE;
5114                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5115                         cmd->supported |= SUPPORTED_2500baseX_Full;
5116
5117                 cmd->port = PORT_FIBRE;
5118         }
5119         else {
5120                 cmd->supported |= SUPPORTED_10baseT_Half |
5121                         SUPPORTED_10baseT_Full |
5122                         SUPPORTED_100baseT_Half |
5123                         SUPPORTED_100baseT_Full |
5124                         SUPPORTED_1000baseT_Full |
5125                         SUPPORTED_TP;
5126
5127                 cmd->port = PORT_TP;
5128         }
5129
5130         cmd->advertising = bp->advertising;
5131
5132         if (bp->autoneg & AUTONEG_SPEED) {
5133                 cmd->autoneg = AUTONEG_ENABLE;
5134         }
5135         else {
5136                 cmd->autoneg = AUTONEG_DISABLE;
5137         }
5138
5139         if (netif_carrier_ok(dev)) {
5140                 cmd->speed = bp->line_speed;
5141                 cmd->duplex = bp->duplex;
5142         }
5143         else {
5144                 cmd->speed = -1;
5145                 cmd->duplex = -1;
5146         }
5147
5148         cmd->transceiver = XCVR_INTERNAL;
5149         cmd->phy_address = bp->phy_addr;
5150
5151         return 0;
5152 }
5153
5154 static int
5155 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5156 {
5157         struct bnx2 *bp = netdev_priv(dev);
5158         u8 autoneg = bp->autoneg;
5159         u8 req_duplex = bp->req_duplex;
5160         u16 req_line_speed = bp->req_line_speed;
5161         u32 advertising = bp->advertising;
5162
5163         if (cmd->autoneg == AUTONEG_ENABLE) {
5164                 autoneg |= AUTONEG_SPEED;
5165
5166                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5167
5168                 /* allow advertising 1 speed */
5169                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5170                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5171                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5172                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5173
5174                         if (bp->phy_flags & PHY_SERDES_FLAG)
5175                                 return -EINVAL;
5176
5177                         advertising = cmd->advertising;
5178
5179                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5180                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5181                                 return -EINVAL;
5182                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5183                         advertising = cmd->advertising;
5184                 }
5185                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5186                         return -EINVAL;
5187                 }
5188                 else {
5189                         if (bp->phy_flags & PHY_SERDES_FLAG) {
5190                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5191                         }
5192                         else {
5193                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5194                         }
5195                 }
5196                 advertising |= ADVERTISED_Autoneg;
5197         }
5198         else {
5199                 if (bp->phy_flags & PHY_SERDES_FLAG) {
5200                         if ((cmd->speed != SPEED_1000 &&
5201                              cmd->speed != SPEED_2500) ||
5202                             (cmd->duplex != DUPLEX_FULL))
5203                                 return -EINVAL;
5204
5205                         if (cmd->speed == SPEED_2500 &&
5206                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5207                                 return -EINVAL;
5208                 }
5209                 else if (cmd->speed == SPEED_1000) {
5210                         return -EINVAL;
5211                 }
5212                 autoneg &= ~AUTONEG_SPEED;
5213                 req_line_speed = cmd->speed;
5214                 req_duplex = cmd->duplex;
5215                 advertising = 0;
5216         }
5217
5218         bp->autoneg = autoneg;
5219         bp->advertising = advertising;
5220         bp->req_line_speed = req_line_speed;
5221         bp->req_duplex = req_duplex;
5222
5223         spin_lock_bh(&bp->phy_lock);
5224
5225         bnx2_setup_phy(bp);
5226
5227         spin_unlock_bh(&bp->phy_lock);
5228
5229         return 0;
5230 }
5231
5232 static void
5233 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5234 {
5235         struct bnx2 *bp = netdev_priv(dev);
5236
5237         strcpy(info->driver, DRV_MODULE_NAME);
5238         strcpy(info->version, DRV_MODULE_VERSION);
5239         strcpy(info->bus_info, pci_name(bp->pdev));
5240         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5241         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5242         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5243         info->fw_version[1] = info->fw_version[3] = '.';
5244         info->fw_version[5] = 0;
5245 }
5246
5247 #define BNX2_REGDUMP_LEN                (32 * 1024)
5248
5249 static int
5250 bnx2_get_regs_len(struct net_device *dev)
5251 {
5252         return BNX2_REGDUMP_LEN;
5253 }
5254
5255 static void
5256 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5257 {
5258         u32 *p = _p, i, offset;
5259         u8 *orig_p = _p;
5260         struct bnx2 *bp = netdev_priv(dev);
5261         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5262                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5263                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5264                                  0x1040, 0x1048, 0x1080, 0x10a4,
5265                                  0x1400, 0x1490, 0x1498, 0x14f0,
5266                                  0x1500, 0x155c, 0x1580, 0x15dc,
5267                                  0x1600, 0x1658, 0x1680, 0x16d8,
5268                                  0x1800, 0x1820, 0x1840, 0x1854,
5269                                  0x1880, 0x1894, 0x1900, 0x1984,
5270                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5271                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5272                                  0x2000, 0x2030, 0x23c0, 0x2400,
5273                                  0x2800, 0x2820, 0x2830, 0x2850,
5274                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5275                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5276                                  0x4080, 0x4090, 0x43c0, 0x4458,
5277                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5278                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5279                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5280                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5281                                  0x6800, 0x6848, 0x684c, 0x6860,
5282                                  0x6888, 0x6910, 0x8000 };
5283
5284         regs->version = 0;
5285
5286         memset(p, 0, BNX2_REGDUMP_LEN);
5287
5288         if (!netif_running(bp->dev))
5289                 return;
5290
5291         i = 0;
5292         offset = reg_boundaries[0];
5293         p += offset;
5294         while (offset < BNX2_REGDUMP_LEN) {
5295                 *p++ = REG_RD(bp, offset);
5296                 offset += 4;
5297                 if (offset == reg_boundaries[i + 1]) {
5298                         offset = reg_boundaries[i + 2];
5299                         p = (u32 *) (orig_p + offset);
5300                         i += 2;
5301                 }
5302         }
5303 }
5304
5305 static void
5306 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5307 {
5308         struct bnx2 *bp = netdev_priv(dev);
5309
5310         if (bp->flags & NO_WOL_FLAG) {
5311                 wol->supported = 0;
5312                 wol->wolopts = 0;
5313         }
5314         else {
5315                 wol->supported = WAKE_MAGIC;
5316                 if (bp->wol)
5317                         wol->wolopts = WAKE_MAGIC;
5318                 else
5319                         wol->wolopts = 0;
5320         }
5321         memset(&wol->sopass, 0, sizeof(wol->sopass));
5322 }
5323
5324 static int
5325 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5326 {
5327         struct bnx2 *bp = netdev_priv(dev);
5328
5329         if (wol->wolopts & ~WAKE_MAGIC)
5330                 return -EINVAL;
5331
5332         if (wol->wolopts & WAKE_MAGIC) {
5333                 if (bp->flags & NO_WOL_FLAG)
5334                         return -EINVAL;
5335
5336                 bp->wol = 1;
5337         }
5338         else {
5339                 bp->wol = 0;
5340         }
5341         return 0;
5342 }
5343
5344 static int
5345 bnx2_nway_reset(struct net_device *dev)
5346 {
5347         struct bnx2 *bp = netdev_priv(dev);
5348         u32 bmcr;
5349
5350         if (!(bp->autoneg & AUTONEG_SPEED)) {
5351                 return -EINVAL;
5352         }
5353
5354         spin_lock_bh(&bp->phy_lock);
5355
5356         /* Force a link down visible on the other side */
5357         if (bp->phy_flags & PHY_SERDES_FLAG) {
5358                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5359                 spin_unlock_bh(&bp->phy_lock);
5360
5361                 msleep(20);
5362
5363                 spin_lock_bh(&bp->phy_lock);
5364
5365                 bp->current_interval = SERDES_AN_TIMEOUT;
5366                 bp->serdes_an_pending = 1;
5367                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5368         }
5369
5370         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5371         bmcr &= ~BMCR_LOOPBACK;
5372         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5373
5374         spin_unlock_bh(&bp->phy_lock);
5375
5376         return 0;
5377 }
5378
5379 static int
5380 bnx2_get_eeprom_len(struct net_device *dev)
5381 {
5382         struct bnx2 *bp = netdev_priv(dev);
5383
5384         if (bp->flash_info == NULL)
5385                 return 0;
5386
5387         return (int) bp->flash_size;
5388 }
5389
5390 static int
5391 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5392                 u8 *eebuf)
5393 {
5394         struct bnx2 *bp = netdev_priv(dev);
5395         int rc;
5396
5397         /* parameters already validated in ethtool_get_eeprom */
5398
5399         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5400
5401         return rc;
5402 }
5403
5404 static int
5405 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5406                 u8 *eebuf)
5407 {
5408         struct bnx2 *bp = netdev_priv(dev);
5409         int rc;
5410
5411         /* parameters already validated in ethtool_set_eeprom */
5412
5413         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5414
5415         return rc;
5416 }
5417
5418 static int
5419 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5420 {
5421         struct bnx2 *bp = netdev_priv(dev);
5422
5423         memset(coal, 0, sizeof(struct ethtool_coalesce));
5424
5425         coal->rx_coalesce_usecs = bp->rx_ticks;
5426         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5427         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5428         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5429
5430         coal->tx_coalesce_usecs = bp->tx_ticks;
5431         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5432         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5433         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5434
5435         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5436
5437         return 0;
5438 }
5439
5440 static int
5441 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5442 {
5443         struct bnx2 *bp = netdev_priv(dev);
5444
5445         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5446         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5447
5448         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5449         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5450
5451         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5452         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5453
5454         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5455         if (bp->rx_quick_cons_trip_int > 0xff)
5456                 bp->rx_quick_cons_trip_int = 0xff;
5457
5458         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5459         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5460
5461         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5462         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5463
5464         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5465         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5466
5467         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5468         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5469                 0xff;
5470
5471         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5472         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5473                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5474                         bp->stats_ticks = USEC_PER_SEC;
5475         }
5476         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5477         bp->stats_ticks &= 0xffff00;
5478
5479         if (netif_running(bp->dev)) {
5480                 bnx2_netif_stop(bp);
5481                 bnx2_init_nic(bp);
5482                 bnx2_netif_start(bp);
5483         }
5484
5485         return 0;
5486 }
5487
5488 static void
5489 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5490 {
5491         struct bnx2 *bp = netdev_priv(dev);
5492
5493         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5494         ering->rx_mini_max_pending = 0;
5495         ering->rx_jumbo_max_pending = 0;
5496
5497         ering->rx_pending = bp->rx_ring_size;
5498         ering->rx_mini_pending = 0;
5499         ering->rx_jumbo_pending = 0;
5500
5501         ering->tx_max_pending = MAX_TX_DESC_CNT;
5502         ering->tx_pending = bp->tx_ring_size;
5503 }
5504
5505 static int
5506 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5507 {
5508         struct bnx2 *bp = netdev_priv(dev);
5509
5510         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5511                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5512                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5513
5514                 return -EINVAL;
5515         }
5516         if (netif_running(bp->dev)) {
5517                 bnx2_netif_stop(bp);
5518                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5519                 bnx2_free_skbs(bp);
5520                 bnx2_free_mem(bp);
5521         }
5522
5523         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5524         bp->tx_ring_size = ering->tx_pending;
5525
5526         if (netif_running(bp->dev)) {
5527                 int rc;
5528
5529                 rc = bnx2_alloc_mem(bp);
5530                 if (rc)
5531                         return rc;
5532                 bnx2_init_nic(bp);
5533                 bnx2_netif_start(bp);
5534         }
5535
5536         return 0;
5537 }
5538
5539 static void
5540 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5541 {
5542         struct bnx2 *bp = netdev_priv(dev);
5543
5544         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5545         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5546         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5547 }
5548
5549 static int
5550 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5551 {
5552         struct bnx2 *bp = netdev_priv(dev);
5553
5554         bp->req_flow_ctrl = 0;
5555         if (epause->rx_pause)
5556                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5557         if (epause->tx_pause)
5558                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5559
5560         if (epause->autoneg) {
5561                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5562         }
5563         else {
5564                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5565         }
5566
5567         spin_lock_bh(&bp->phy_lock);
5568
5569         bnx2_setup_phy(bp);
5570
5571         spin_unlock_bh(&bp->phy_lock);
5572
5573         return 0;
5574 }
5575
5576 static u32
5577 bnx2_get_rx_csum(struct net_device *dev)
5578 {
5579         struct bnx2 *bp = netdev_priv(dev);
5580
5581         return bp->rx_csum;
5582 }
5583
5584 static int
5585 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5586 {
5587         struct bnx2 *bp = netdev_priv(dev);
5588
5589         bp->rx_csum = data;
5590         return 0;
5591 }
5592
5593 static int
5594 bnx2_set_tso(struct net_device *dev, u32 data)
5595 {
5596         struct bnx2 *bp = netdev_priv(dev);
5597
5598         if (data) {
5599                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5600                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5601                         dev->features |= NETIF_F_TSO6;
5602         } else
5603                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5604                                    NETIF_F_TSO_ECN);
5605         return 0;
5606 }
5607
5608 #define BNX2_NUM_STATS 46
5609
5610 static struct {
5611         char string[ETH_GSTRING_LEN];
5612 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5613         { "rx_bytes" },
5614         { "rx_error_bytes" },
5615         { "tx_bytes" },
5616         { "tx_error_bytes" },
5617         { "rx_ucast_packets" },
5618         { "rx_mcast_packets" },
5619         { "rx_bcast_packets" },
5620         { "tx_ucast_packets" },
5621         { "tx_mcast_packets" },
5622         { "tx_bcast_packets" },
5623         { "tx_mac_errors" },
5624         { "tx_carrier_errors" },
5625         { "rx_crc_errors" },
5626         { "rx_align_errors" },
5627         { "tx_single_collisions" },
5628         { "tx_multi_collisions" },
5629         { "tx_deferred" },
5630         { "tx_excess_collisions" },
5631         { "tx_late_collisions" },
5632         { "tx_total_collisions" },
5633         { "rx_fragments" },
5634         { "rx_jabbers" },
5635         { "rx_undersize_packets" },
5636         { "rx_oversize_packets" },
5637         { "rx_64_byte_packets" },
5638         { "rx_65_to_127_byte_packets" },
5639         { "rx_128_to_255_byte_packets" },
5640         { "rx_256_to_511_byte_packets" },
5641         { "rx_512_to_1023_byte_packets" },
5642         { "rx_1024_to_1522_byte_packets" },
5643         { "rx_1523_to_9022_byte_packets" },
5644         { "tx_64_byte_packets" },
5645         { "tx_65_to_127_byte_packets" },
5646         { "tx_128_to_255_byte_packets" },
5647         { "tx_256_to_511_byte_packets" },
5648         { "tx_512_to_1023_byte_packets" },
5649         { "tx_1024_to_1522_byte_packets" },
5650         { "tx_1523_to_9022_byte_packets" },
5651         { "rx_xon_frames" },
5652         { "rx_xoff_frames" },
5653         { "tx_xon_frames" },
5654         { "tx_xoff_frames" },
5655         { "rx_mac_ctrl_frames" },
5656         { "rx_filtered_packets" },
5657         { "rx_discards" },
5658         { "rx_fw_discards" },
5659 };
5660
5661 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5662
5663 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5664     STATS_OFFSET32(stat_IfHCInOctets_hi),
5665     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5666     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5667     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5668     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5669     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5670     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5671     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5672     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5673     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5674     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5675     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5676     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5677     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5678     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5679     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5680     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5681     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5682     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5683     STATS_OFFSET32(stat_EtherStatsCollisions),
5684     STATS_OFFSET32(stat_EtherStatsFragments),
5685     STATS_OFFSET32(stat_EtherStatsJabbers),
5686     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5687     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5688     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5689     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5690     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5691     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5692     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5693     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5694     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5695     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5696     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5697     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5698     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5699     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5700     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5701     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5702     STATS_OFFSET32(stat_XonPauseFramesReceived),
5703     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5704     STATS_OFFSET32(stat_OutXonSent),
5705     STATS_OFFSET32(stat_OutXoffSent),
5706     STATS_OFFSET32(stat_MacControlFramesReceived),
5707     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5708     STATS_OFFSET32(stat_IfInMBUFDiscards),
5709     STATS_OFFSET32(stat_FwRxDrop),
5710 };
5711
5712 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5713  * skipped because of errata.
5714  */
5715 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5716         8,0,8,8,8,8,8,8,8,8,
5717         4,0,4,4,4,4,4,4,4,4,
5718         4,4,4,4,4,4,4,4,4,4,
5719         4,4,4,4,4,4,4,4,4,4,
5720         4,4,4,4,4,4,
5721 };
5722
5723 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5724         8,0,8,8,8,8,8,8,8,8,
5725         4,4,4,4,4,4,4,4,4,4,
5726         4,4,4,4,4,4,4,4,4,4,
5727         4,4,4,4,4,4,4,4,4,4,
5728         4,4,4,4,4,4,
5729 };
5730
5731 #define BNX2_NUM_TESTS 6
5732
5733 static struct {
5734         char string[ETH_GSTRING_LEN];
5735 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5736         { "register_test (offline)" },
5737         { "memory_test (offline)" },
5738         { "loopback_test (offline)" },
5739         { "nvram_test (online)" },
5740         { "interrupt_test (online)" },
5741         { "link_test (online)" },
5742 };
5743
5744 static int
5745 bnx2_self_test_count(struct net_device *dev)
5746 {
5747         return BNX2_NUM_TESTS;
5748 }
5749
5750 static void
5751 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5752 {
5753         struct bnx2 *bp = netdev_priv(dev);
5754
5755         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5756         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5757                 int i;
5758
5759                 bnx2_netif_stop(bp);
5760                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5761                 bnx2_free_skbs(bp);
5762
5763                 if (bnx2_test_registers(bp) != 0) {
5764                         buf[0] = 1;
5765                         etest->flags |= ETH_TEST_FL_FAILED;
5766                 }
5767                 if (bnx2_test_memory(bp) != 0) {
5768                         buf[1] = 1;
5769                         etest->flags |= ETH_TEST_FL_FAILED;
5770                 }
5771                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5772                         etest->flags |= ETH_TEST_FL_FAILED;
5773
5774                 if (!netif_running(bp->dev)) {
5775                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5776                 }
5777                 else {
5778                         bnx2_init_nic(bp);
5779                         bnx2_netif_start(bp);
5780                 }
5781
5782                 /* wait for link up */
5783                 for (i = 0; i < 7; i++) {
5784                         if (bp->link_up)
5785                                 break;
5786                         msleep_interruptible(1000);
5787                 }
5788         }
5789
5790         if (bnx2_test_nvram(bp) != 0) {
5791                 buf[3] = 1;
5792                 etest->flags |= ETH_TEST_FL_FAILED;
5793         }
5794         if (bnx2_test_intr(bp) != 0) {
5795                 buf[4] = 1;
5796                 etest->flags |= ETH_TEST_FL_FAILED;
5797         }
5798
5799         if (bnx2_test_link(bp) != 0) {
5800                 buf[5] = 1;
5801                 etest->flags |= ETH_TEST_FL_FAILED;
5802
5803         }
5804 }
5805
5806 static void
5807 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5808 {
5809         switch (stringset) {
5810         case ETH_SS_STATS:
5811                 memcpy(buf, bnx2_stats_str_arr,
5812                         sizeof(bnx2_stats_str_arr));
5813                 break;
5814         case ETH_SS_TEST:
5815                 memcpy(buf, bnx2_tests_str_arr,
5816                         sizeof(bnx2_tests_str_arr));
5817                 break;
5818         }
5819 }
5820
5821 static int
5822 bnx2_get_stats_count(struct net_device *dev)
5823 {
5824         return BNX2_NUM_STATS;
5825 }
5826
5827 static void
5828 bnx2_get_ethtool_stats(struct net_device *dev,
5829                 struct ethtool_stats *stats, u64 *buf)
5830 {
5831         struct bnx2 *bp = netdev_priv(dev);
5832         int i;
5833         u32 *hw_stats = (u32 *) bp->stats_blk;
5834         u8 *stats_len_arr = NULL;
5835
5836         if (hw_stats == NULL) {
5837                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5838                 return;
5839         }
5840
5841         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5842             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5843             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5844             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5845                 stats_len_arr = bnx2_5706_stats_len_arr;
5846         else
5847                 stats_len_arr = bnx2_5708_stats_len_arr;
5848
5849         for (i = 0; i < BNX2_NUM_STATS; i++) {
5850                 if (stats_len_arr[i] == 0) {
5851                         /* skip this counter */
5852                         buf[i] = 0;
5853                         continue;
5854                 }
5855                 if (stats_len_arr[i] == 4) {
5856                         /* 4-byte counter */
5857                         buf[i] = (u64)
5858                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5859                         continue;
5860                 }
5861                 /* 8-byte counter */
5862                 buf[i] = (((u64) *(hw_stats +
5863                                         bnx2_stats_offset_arr[i])) << 32) +
5864                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5865         }
5866 }
5867
5868 static int
5869 bnx2_phys_id(struct net_device *dev, u32 data)
5870 {
5871         struct bnx2 *bp = netdev_priv(dev);
5872         int i;
5873         u32 save;
5874
5875         if (data == 0)
5876                 data = 2;
5877
5878         save = REG_RD(bp, BNX2_MISC_CFG);
5879         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5880
5881         for (i = 0; i < (data * 2); i++) {
5882                 if ((i % 2) == 0) {
5883                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5884                 }
5885                 else {
5886                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5887                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5888                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5889                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5890                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5891                                 BNX2_EMAC_LED_TRAFFIC);
5892                 }
5893                 msleep_interruptible(500);
5894                 if (signal_pending(current))
5895                         break;
5896         }
5897         REG_WR(bp, BNX2_EMAC_LED, 0);
5898         REG_WR(bp, BNX2_MISC_CFG, save);
5899         return 0;
5900 }
5901
5902 static int
5903 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5904 {
5905         struct bnx2 *bp = netdev_priv(dev);
5906
5907         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5908                 return (ethtool_op_set_tx_hw_csum(dev, data));
5909         else
5910                 return (ethtool_op_set_tx_csum(dev, data));
5911 }
5912
5913 static const struct ethtool_ops bnx2_ethtool_ops = {
5914         .get_settings           = bnx2_get_settings,
5915         .set_settings           = bnx2_set_settings,
5916         .get_drvinfo            = bnx2_get_drvinfo,
5917         .get_regs_len           = bnx2_get_regs_len,
5918         .get_regs               = bnx2_get_regs,
5919         .get_wol                = bnx2_get_wol,
5920         .set_wol                = bnx2_set_wol,
5921         .nway_reset             = bnx2_nway_reset,
5922         .get_link               = ethtool_op_get_link,
5923         .get_eeprom_len         = bnx2_get_eeprom_len,
5924         .get_eeprom             = bnx2_get_eeprom,
5925         .set_eeprom             = bnx2_set_eeprom,
5926         .get_coalesce           = bnx2_get_coalesce,
5927         .set_coalesce           = bnx2_set_coalesce,
5928         .get_ringparam          = bnx2_get_ringparam,
5929         .set_ringparam          = bnx2_set_ringparam,
5930         .get_pauseparam         = bnx2_get_pauseparam,
5931         .set_pauseparam         = bnx2_set_pauseparam,
5932         .get_rx_csum            = bnx2_get_rx_csum,
5933         .set_rx_csum            = bnx2_set_rx_csum,
5934         .get_tx_csum            = ethtool_op_get_tx_csum,
5935         .set_tx_csum            = bnx2_set_tx_csum,
5936         .get_sg                 = ethtool_op_get_sg,
5937         .set_sg                 = ethtool_op_set_sg,
5938         .get_tso                = ethtool_op_get_tso,
5939         .set_tso                = bnx2_set_tso,
5940         .self_test_count        = bnx2_self_test_count,
5941         .self_test              = bnx2_self_test,
5942         .get_strings            = bnx2_get_strings,
5943         .phys_id                = bnx2_phys_id,
5944         .get_stats_count        = bnx2_get_stats_count,
5945         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5946         .get_perm_addr          = ethtool_op_get_perm_addr,
5947 };
5948
5949 /* Called with rtnl_lock */
5950 static int
5951 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5952 {
5953         struct mii_ioctl_data *data = if_mii(ifr);
5954         struct bnx2 *bp = netdev_priv(dev);
5955         int err;
5956
5957         switch(cmd) {
5958         case SIOCGMIIPHY:
5959                 data->phy_id = bp->phy_addr;
5960
5961                 /* fallthru */
5962         case SIOCGMIIREG: {
5963                 u32 mii_regval;
5964
5965                 if (!netif_running(dev))
5966                         return -EAGAIN;
5967
5968                 spin_lock_bh(&bp->phy_lock);
5969                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5970                 spin_unlock_bh(&bp->phy_lock);
5971
5972                 data->val_out = mii_regval;
5973
5974                 return err;
5975         }
5976
5977         case SIOCSMIIREG:
5978                 if (!capable(CAP_NET_ADMIN))
5979                         return -EPERM;
5980
5981                 if (!netif_running(dev))
5982                         return -EAGAIN;
5983
5984                 spin_lock_bh(&bp->phy_lock);
5985                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5986                 spin_unlock_bh(&bp->phy_lock);
5987
5988                 return err;
5989
5990         default:
5991                 /* do nothing */
5992                 break;
5993         }
5994         return -EOPNOTSUPP;
5995 }
5996
5997 /* Called with rtnl_lock */
5998 static int
5999 bnx2_change_mac_addr(struct net_device *dev, void *p)
6000 {
6001         struct sockaddr *addr = p;
6002         struct bnx2 *bp = netdev_priv(dev);
6003
6004         if (!is_valid_ether_addr(addr->sa_data))
6005                 return -EINVAL;
6006
6007         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6008         if (netif_running(dev))
6009                 bnx2_set_mac_addr(bp);
6010
6011         return 0;
6012 }
6013
6014 /* Called with rtnl_lock */
6015 static int
6016 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6017 {
6018         struct bnx2 *bp = netdev_priv(dev);
6019
6020         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6021                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6022                 return -EINVAL;
6023
6024         dev->mtu = new_mtu;
6025         if (netif_running(dev)) {
6026                 bnx2_netif_stop(bp);
6027
6028                 bnx2_init_nic(bp);
6029
6030                 bnx2_netif_start(bp);
6031         }
6032         return 0;
6033 }
6034
6035 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6036 static void
6037 poll_bnx2(struct net_device *dev)
6038 {
6039         struct bnx2 *bp = netdev_priv(dev);
6040
6041         disable_irq(bp->pdev->irq);
6042         bnx2_interrupt(bp->pdev->irq, dev);
6043         enable_irq(bp->pdev->irq);
6044 }
6045 #endif
6046
6047 static void __devinit
6048 bnx2_get_5709_media(struct bnx2 *bp)
6049 {
6050         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6051         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6052         u32 strap;
6053
6054         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6055                 return;
6056         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6057                 bp->phy_flags |= PHY_SERDES_FLAG;
6058                 return;
6059         }
6060
6061         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6062                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6063         else
6064                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6065
6066         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6067                 switch (strap) {
6068                 case 0x4:
6069                 case 0x5:
6070                 case 0x6:
6071                         bp->phy_flags |= PHY_SERDES_FLAG;
6072                         return;
6073                 }
6074         } else {
6075                 switch (strap) {
6076                 case 0x1:
6077                 case 0x2:
6078                 case 0x4:
6079                         bp->phy_flags |= PHY_SERDES_FLAG;
6080                         return;
6081                 }
6082         }
6083 }
6084
6085 static void __devinit
6086 bnx2_get_pci_speed(struct bnx2 *bp)
6087 {
6088         u32 reg;
6089
6090         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6091         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6092                 u32 clkreg;
6093
6094                 bp->flags |= PCIX_FLAG;
6095
6096                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6097
6098                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6099                 switch (clkreg) {
6100                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6101                         bp->bus_speed_mhz = 133;
6102                         break;
6103
6104                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6105                         bp->bus_speed_mhz = 100;
6106                         break;
6107
6108                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6109                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6110                         bp->bus_speed_mhz = 66;
6111                         break;
6112
6113                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6114                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6115                         bp->bus_speed_mhz = 50;
6116                         break;
6117
6118                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6119                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6120                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6121                         bp->bus_speed_mhz = 33;
6122                         break;
6123                 }
6124         }
6125         else {
6126                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6127                         bp->bus_speed_mhz = 66;
6128                 else
6129                         bp->bus_speed_mhz = 33;
6130         }
6131
6132         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6133                 bp->flags |= PCI_32BIT_FLAG;
6134
6135 }
6136
6137 static int __devinit
6138 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6139 {
6140         struct bnx2 *bp;
6141         unsigned long mem_len;
6142         int rc;
6143         u32 reg;
6144         u64 dma_mask, persist_dma_mask;
6145
6146         SET_MODULE_OWNER(dev);
6147         SET_NETDEV_DEV(dev, &pdev->dev);
6148         bp = netdev_priv(dev);
6149
6150         bp->flags = 0;
6151         bp->phy_flags = 0;
6152
6153         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6154         rc = pci_enable_device(pdev);
6155         if (rc) {
6156                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6157                 goto err_out;
6158         }
6159
6160         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6161                 dev_err(&pdev->dev,
6162                         "Cannot find PCI device base address, aborting.\n");
6163                 rc = -ENODEV;
6164                 goto err_out_disable;
6165         }
6166
6167         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6168         if (rc) {
6169                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6170                 goto err_out_disable;
6171         }
6172
6173         pci_set_master(pdev);
6174
6175         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6176         if (bp->pm_cap == 0) {
6177                 dev_err(&pdev->dev,
6178                         "Cannot find power management capability, aborting.\n");
6179                 rc = -EIO;
6180                 goto err_out_release;
6181         }
6182
6183         bp->dev = dev;
6184         bp->pdev = pdev;
6185
6186         spin_lock_init(&bp->phy_lock);
6187         spin_lock_init(&bp->indirect_lock);
6188         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6189
6190         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6191         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6192         dev->mem_end = dev->mem_start + mem_len;
6193         dev->irq = pdev->irq;
6194
6195         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6196
6197         if (!bp->regview) {