0300a759728c782a049643f57e1bb29ca8148005
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
412         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
414                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
415
416         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
417                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
418
419         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
420 }
421
422 static void
423 bnx2_disable_int_sync(struct bnx2 *bp)
424 {
425         atomic_inc(&bp->intr_sem);
426         bnx2_disable_int(bp);
427         synchronize_irq(bp->pdev->irq);
428 }
429
430 static void
431 bnx2_napi_disable(struct bnx2 *bp)
432 {
433         napi_disable(&bp->bnx2_napi.napi);
434 }
435
436 static void
437 bnx2_napi_enable(struct bnx2 *bp)
438 {
439         napi_enable(&bp->bnx2_napi.napi);
440 }
441
442 static void
443 bnx2_netif_stop(struct bnx2 *bp)
444 {
445         bnx2_disable_int_sync(bp);
446         if (netif_running(bp->dev)) {
447                 bnx2_napi_disable(bp);
448                 netif_tx_disable(bp->dev);
449                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450         }
451 }
452
453 static void
454 bnx2_netif_start(struct bnx2 *bp)
455 {
456         if (atomic_dec_and_test(&bp->intr_sem)) {
457                 if (netif_running(bp->dev)) {
458                         netif_wake_queue(bp->dev);
459                         bnx2_napi_enable(bp);
460                         bnx2_enable_int(bp);
461                 }
462         }
463 }
464
465 static void
466 bnx2_free_mem(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->ctx_pages; i++) {
471                 if (bp->ctx_blk[i]) {
472                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473                                             bp->ctx_blk[i],
474                                             bp->ctx_blk_mapping[i]);
475                         bp->ctx_blk[i] = NULL;
476                 }
477         }
478         if (bp->status_blk) {
479                 pci_free_consistent(bp->pdev, bp->status_stats_size,
480                                     bp->status_blk, bp->status_blk_mapping);
481                 bp->status_blk = NULL;
482                 bp->stats_blk = NULL;
483         }
484         if (bp->tx_desc_ring) {
485                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
486                                     bp->tx_desc_ring, bp->tx_desc_mapping);
487                 bp->tx_desc_ring = NULL;
488         }
489         kfree(bp->tx_buf_ring);
490         bp->tx_buf_ring = NULL;
491         for (i = 0; i < bp->rx_max_ring; i++) {
492                 if (bp->rx_desc_ring[i])
493                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
494                                             bp->rx_desc_ring[i],
495                                             bp->rx_desc_mapping[i]);
496                 bp->rx_desc_ring[i] = NULL;
497         }
498         vfree(bp->rx_buf_ring);
499         bp->rx_buf_ring = NULL;
500         for (i = 0; i < bp->rx_max_pg_ring; i++) {
501                 if (bp->rx_pg_desc_ring[i])
502                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503                                             bp->rx_pg_desc_ring[i],
504                                             bp->rx_pg_desc_mapping[i]);
505                 bp->rx_pg_desc_ring[i] = NULL;
506         }
507         if (bp->rx_pg_ring)
508                 vfree(bp->rx_pg_ring);
509         bp->rx_pg_ring = NULL;
510 }
511
512 static int
513 bnx2_alloc_mem(struct bnx2 *bp)
514 {
515         int i, status_blk_size;
516
517         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
518         if (bp->tx_buf_ring == NULL)
519                 return -ENOMEM;
520
521         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
522                                                 &bp->tx_desc_mapping);
523         if (bp->tx_desc_ring == NULL)
524                 goto alloc_mem_err;
525
526         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
527         if (bp->rx_buf_ring == NULL)
528                 goto alloc_mem_err;
529
530         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
531
532         for (i = 0; i < bp->rx_max_ring; i++) {
533                 bp->rx_desc_ring[i] =
534                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
535                                              &bp->rx_desc_mapping[i]);
536                 if (bp->rx_desc_ring[i] == NULL)
537                         goto alloc_mem_err;
538
539         }
540
541         if (bp->rx_pg_ring_size) {
542                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543                                          bp->rx_max_pg_ring);
544                 if (bp->rx_pg_ring == NULL)
545                         goto alloc_mem_err;
546
547                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548                        bp->rx_max_pg_ring);
549         }
550
551         for (i = 0; i < bp->rx_max_pg_ring; i++) {
552                 bp->rx_pg_desc_ring[i] =
553                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554                                              &bp->rx_pg_desc_mapping[i]);
555                 if (bp->rx_pg_desc_ring[i] == NULL)
556                         goto alloc_mem_err;
557
558         }
559
560         /* Combine status and statistics blocks into one allocation. */
561         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562         bp->status_stats_size = status_blk_size +
563                                 sizeof(struct statistics_block);
564
565         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
566                                               &bp->status_blk_mapping);
567         if (bp->status_blk == NULL)
568                 goto alloc_mem_err;
569
570         memset(bp->status_blk, 0, bp->status_stats_size);
571
572         bp->bnx2_napi.status_blk = bp->status_blk;
573
574         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575                                   status_blk_size);
576
577         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
578
579         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581                 if (bp->ctx_pages == 0)
582                         bp->ctx_pages = 1;
583                 for (i = 0; i < bp->ctx_pages; i++) {
584                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585                                                 BCM_PAGE_SIZE,
586                                                 &bp->ctx_blk_mapping[i]);
587                         if (bp->ctx_blk[i] == NULL)
588                                 goto alloc_mem_err;
589                 }
590         }
591         return 0;
592
593 alloc_mem_err:
594         bnx2_free_mem(bp);
595         return -ENOMEM;
596 }
597
598 static void
599 bnx2_report_fw_link(struct bnx2 *bp)
600 {
601         u32 fw_link_status = 0;
602
603         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604                 return;
605
606         if (bp->link_up) {
607                 u32 bmsr;
608
609                 switch (bp->line_speed) {
610                 case SPEED_10:
611                         if (bp->duplex == DUPLEX_HALF)
612                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
613                         else
614                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
615                         break;
616                 case SPEED_100:
617                         if (bp->duplex == DUPLEX_HALF)
618                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
619                         else
620                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
621                         break;
622                 case SPEED_1000:
623                         if (bp->duplex == DUPLEX_HALF)
624                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625                         else
626                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627                         break;
628                 case SPEED_2500:
629                         if (bp->duplex == DUPLEX_HALF)
630                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631                         else
632                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633                         break;
634                 }
635
636                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638                 if (bp->autoneg) {
639                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
641                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
643
644                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647                         else
648                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649                 }
650         }
651         else
652                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655 }
656
657 static char *
658 bnx2_xceiver_str(struct bnx2 *bp)
659 {
660         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662                  "Copper"));
663 }
664
665 static void
666 bnx2_report_link(struct bnx2 *bp)
667 {
668         if (bp->link_up) {
669                 netif_carrier_on(bp->dev);
670                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671                        bnx2_xceiver_str(bp));
672
673                 printk("%d Mbps ", bp->line_speed);
674
675                 if (bp->duplex == DUPLEX_FULL)
676                         printk("full duplex");
677                 else
678                         printk("half duplex");
679
680                 if (bp->flow_ctrl) {
681                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
682                                 printk(", receive ");
683                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
684                                         printk("& transmit ");
685                         }
686                         else {
687                                 printk(", transmit ");
688                         }
689                         printk("flow control ON");
690                 }
691                 printk("\n");
692         }
693         else {
694                 netif_carrier_off(bp->dev);
695                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696                        bnx2_xceiver_str(bp));
697         }
698
699         bnx2_report_fw_link(bp);
700 }
701
702 static void
703 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704 {
705         u32 local_adv, remote_adv;
706
707         bp->flow_ctrl = 0;
708         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
709                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711                 if (bp->duplex == DUPLEX_FULL) {
712                         bp->flow_ctrl = bp->req_flow_ctrl;
713                 }
714                 return;
715         }
716
717         if (bp->duplex != DUPLEX_FULL) {
718                 return;
719         }
720
721         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723                 u32 val;
724
725                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727                         bp->flow_ctrl |= FLOW_CTRL_TX;
728                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729                         bp->flow_ctrl |= FLOW_CTRL_RX;
730                 return;
731         }
732
733         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
735
736         if (bp->phy_flags & PHY_SERDES_FLAG) {
737                 u32 new_local_adv = 0;
738                 u32 new_remote_adv = 0;
739
740                 if (local_adv & ADVERTISE_1000XPAUSE)
741                         new_local_adv |= ADVERTISE_PAUSE_CAP;
742                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
744                 if (remote_adv & ADVERTISE_1000XPAUSE)
745                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
746                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749                 local_adv = new_local_adv;
750                 remote_adv = new_remote_adv;
751         }
752
753         /* See Table 28B-3 of 802.3ab-1999 spec. */
754         if (local_adv & ADVERTISE_PAUSE_CAP) {
755                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
757                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758                         }
759                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760                                 bp->flow_ctrl = FLOW_CTRL_RX;
761                         }
762                 }
763                 else {
764                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
765                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766                         }
767                 }
768         }
769         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773                         bp->flow_ctrl = FLOW_CTRL_TX;
774                 }
775         }
776 }
777
778 static int
779 bnx2_5709s_linkup(struct bnx2 *bp)
780 {
781         u32 val, speed;
782
783         bp->link_up = 1;
784
785         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790                 bp->line_speed = bp->req_line_speed;
791                 bp->duplex = bp->req_duplex;
792                 return 0;
793         }
794         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795         switch (speed) {
796                 case MII_BNX2_GP_TOP_AN_SPEED_10:
797                         bp->line_speed = SPEED_10;
798                         break;
799                 case MII_BNX2_GP_TOP_AN_SPEED_100:
800                         bp->line_speed = SPEED_100;
801                         break;
802                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804                         bp->line_speed = SPEED_1000;
805                         break;
806                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807                         bp->line_speed = SPEED_2500;
808                         break;
809         }
810         if (val & MII_BNX2_GP_TOP_AN_FD)
811                 bp->duplex = DUPLEX_FULL;
812         else
813                 bp->duplex = DUPLEX_HALF;
814         return 0;
815 }
816
817 static int
818 bnx2_5708s_linkup(struct bnx2 *bp)
819 {
820         u32 val;
821
822         bp->link_up = 1;
823         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825                 case BCM5708S_1000X_STAT1_SPEED_10:
826                         bp->line_speed = SPEED_10;
827                         break;
828                 case BCM5708S_1000X_STAT1_SPEED_100:
829                         bp->line_speed = SPEED_100;
830                         break;
831                 case BCM5708S_1000X_STAT1_SPEED_1G:
832                         bp->line_speed = SPEED_1000;
833                         break;
834                 case BCM5708S_1000X_STAT1_SPEED_2G5:
835                         bp->line_speed = SPEED_2500;
836                         break;
837         }
838         if (val & BCM5708S_1000X_STAT1_FD)
839                 bp->duplex = DUPLEX_FULL;
840         else
841                 bp->duplex = DUPLEX_HALF;
842
843         return 0;
844 }
845
846 static int
847 bnx2_5706s_linkup(struct bnx2 *bp)
848 {
849         u32 bmcr, local_adv, remote_adv, common;
850
851         bp->link_up = 1;
852         bp->line_speed = SPEED_1000;
853
854         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
855         if (bmcr & BMCR_FULLDPLX) {
856                 bp->duplex = DUPLEX_FULL;
857         }
858         else {
859                 bp->duplex = DUPLEX_HALF;
860         }
861
862         if (!(bmcr & BMCR_ANENABLE)) {
863                 return 0;
864         }
865
866         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868
869         common = local_adv & remote_adv;
870         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872                 if (common & ADVERTISE_1000XFULL) {
873                         bp->duplex = DUPLEX_FULL;
874                 }
875                 else {
876                         bp->duplex = DUPLEX_HALF;
877                 }
878         }
879
880         return 0;
881 }
882
883 static int
884 bnx2_copper_linkup(struct bnx2 *bp)
885 {
886         u32 bmcr;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_ANENABLE) {
890                 u32 local_adv, remote_adv, common;
891
892                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895                 common = local_adv & (remote_adv >> 2);
896                 if (common & ADVERTISE_1000FULL) {
897                         bp->line_speed = SPEED_1000;
898                         bp->duplex = DUPLEX_FULL;
899                 }
900                 else if (common & ADVERTISE_1000HALF) {
901                         bp->line_speed = SPEED_1000;
902                         bp->duplex = DUPLEX_HALF;
903                 }
904                 else {
905                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
907
908                         common = local_adv & remote_adv;
909                         if (common & ADVERTISE_100FULL) {
910                                 bp->line_speed = SPEED_100;
911                                 bp->duplex = DUPLEX_FULL;
912                         }
913                         else if (common & ADVERTISE_100HALF) {
914                                 bp->line_speed = SPEED_100;
915                                 bp->duplex = DUPLEX_HALF;
916                         }
917                         else if (common & ADVERTISE_10FULL) {
918                                 bp->line_speed = SPEED_10;
919                                 bp->duplex = DUPLEX_FULL;
920                         }
921                         else if (common & ADVERTISE_10HALF) {
922                                 bp->line_speed = SPEED_10;
923                                 bp->duplex = DUPLEX_HALF;
924                         }
925                         else {
926                                 bp->line_speed = 0;
927                                 bp->link_up = 0;
928                         }
929                 }
930         }
931         else {
932                 if (bmcr & BMCR_SPEED100) {
933                         bp->line_speed = SPEED_100;
934                 }
935                 else {
936                         bp->line_speed = SPEED_10;
937                 }
938                 if (bmcr & BMCR_FULLDPLX) {
939                         bp->duplex = DUPLEX_FULL;
940                 }
941                 else {
942                         bp->duplex = DUPLEX_HALF;
943                 }
944         }
945
946         return 0;
947 }
948
949 static int
950 bnx2_set_mac_link(struct bnx2 *bp)
951 {
952         u32 val;
953
954         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956                 (bp->duplex == DUPLEX_HALF)) {
957                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958         }
959
960         /* Configure the EMAC mode register. */
961         val = REG_RD(bp, BNX2_EMAC_MODE);
962
963         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
964                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
965                 BNX2_EMAC_MODE_25G_MODE);
966
967         if (bp->link_up) {
968                 switch (bp->line_speed) {
969                         case SPEED_10:
970                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
972                                         break;
973                                 }
974                                 /* fall through */
975                         case SPEED_100:
976                                 val |= BNX2_EMAC_MODE_PORT_MII;
977                                 break;
978                         case SPEED_2500:
979                                 val |= BNX2_EMAC_MODE_25G_MODE;
980                                 /* fall through */
981                         case SPEED_1000:
982                                 val |= BNX2_EMAC_MODE_PORT_GMII;
983                                 break;
984                 }
985         }
986         else {
987                 val |= BNX2_EMAC_MODE_PORT_GMII;
988         }
989
990         /* Set the MAC to operate in the appropriate duplex mode. */
991         if (bp->duplex == DUPLEX_HALF)
992                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993         REG_WR(bp, BNX2_EMAC_MODE, val);
994
995         /* Enable/disable rx PAUSE. */
996         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998         if (bp->flow_ctrl & FLOW_CTRL_RX)
999                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002         /* Enable/disable tx PAUSE. */
1003         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006         if (bp->flow_ctrl & FLOW_CTRL_TX)
1007                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010         /* Acknowledge the interrupt. */
1011         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013         return 0;
1014 }
1015
1016 static void
1017 bnx2_enable_bmsr1(struct bnx2 *bp)
1018 {
1019         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020             (CHIP_NUM(bp) == CHIP_NUM_5709))
1021                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022                                MII_BNX2_BLK_ADDR_GP_STATUS);
1023 }
1024
1025 static void
1026 bnx2_disable_bmsr1(struct bnx2 *bp)
1027 {
1028         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5709))
1030                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032 }
1033
1034 static int
1035 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036 {
1037         u32 up1;
1038         int ret = 1;
1039
1040         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041                 return 0;
1042
1043         if (bp->autoneg & AUTONEG_SPEED)
1044                 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
1046         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
1049         bnx2_read_phy(bp, bp->mii_up1, &up1);
1050         if (!(up1 & BCM5708S_UP1_2G5)) {
1051                 up1 |= BCM5708S_UP1_2G5;
1052                 bnx2_write_phy(bp, bp->mii_up1, up1);
1053                 ret = 0;
1054         }
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
1060         return ret;
1061 }
1062
1063 static int
1064 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065 {
1066         u32 up1;
1067         int ret = 0;
1068
1069         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070                 return 0;
1071
1072         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
1075         bnx2_read_phy(bp, bp->mii_up1, &up1);
1076         if (up1 & BCM5708S_UP1_2G5) {
1077                 up1 &= ~BCM5708S_UP1_2G5;
1078                 bnx2_write_phy(bp, bp->mii_up1, up1);
1079                 ret = 1;
1080         }
1081
1082         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
1086         return ret;
1087 }
1088
1089 static void
1090 bnx2_enable_forced_2g5(struct bnx2 *bp)
1091 {
1092         u32 bmcr;
1093
1094         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095                 return;
1096
1097         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098                 u32 val;
1099
1100                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1102                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1112                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114         }
1115
1116         if (bp->autoneg & AUTONEG_SPEED) {
1117                 bmcr &= ~BMCR_ANENABLE;
1118                 if (bp->req_duplex == DUPLEX_FULL)
1119                         bmcr |= BMCR_FULLDPLX;
1120         }
1121         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122 }
1123
1124 static void
1125 bnx2_disable_forced_2g5(struct bnx2 *bp)
1126 {
1127         u32 bmcr;
1128
1129         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130                 return;
1131
1132         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133                 u32 val;
1134
1135                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1137                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED)
1151                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153 }
1154
1155 static int
1156 bnx2_set_link(struct bnx2 *bp)
1157 {
1158         u32 bmsr;
1159         u8 link_up;
1160
1161         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1162                 bp->link_up = 1;
1163                 return 0;
1164         }
1165
1166         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167                 return 0;
1168
1169         link_up = bp->link_up;
1170
1171         bnx2_enable_bmsr1(bp);
1172         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174         bnx2_disable_bmsr1(bp);
1175
1176         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178                 u32 val;
1179
1180                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181                 if (val & BNX2_EMAC_STATUS_LINK)
1182                         bmsr |= BMSR_LSTATUS;
1183                 else
1184                         bmsr &= ~BMSR_LSTATUS;
1185         }
1186
1187         if (bmsr & BMSR_LSTATUS) {
1188                 bp->link_up = 1;
1189
1190                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1191                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192                                 bnx2_5706s_linkup(bp);
1193                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194                                 bnx2_5708s_linkup(bp);
1195                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196                                 bnx2_5709s_linkup(bp);
1197                 }
1198                 else {
1199                         bnx2_copper_linkup(bp);
1200                 }
1201                 bnx2_resolve_flow_ctrl(bp);
1202         }
1203         else {
1204                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1205                     (bp->autoneg & AUTONEG_SPEED))
1206                         bnx2_disable_forced_2g5(bp);
1207
1208                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209                 bp->link_up = 0;
1210         }
1211
1212         if (bp->link_up != link_up) {
1213                 bnx2_report_link(bp);
1214         }
1215
1216         bnx2_set_mac_link(bp);
1217
1218         return 0;
1219 }
1220
1221 static int
1222 bnx2_reset_phy(struct bnx2 *bp)
1223 {
1224         int i;
1225         u32 reg;
1226
1227         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1228
1229 #define PHY_RESET_MAX_WAIT 100
1230         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231                 udelay(10);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1234                 if (!(reg & BMCR_RESET)) {
1235                         udelay(20);
1236                         break;
1237                 }
1238         }
1239         if (i == PHY_RESET_MAX_WAIT) {
1240                 return -EBUSY;
1241         }
1242         return 0;
1243 }
1244
1245 static u32
1246 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247 {
1248         u32 adv = 0;
1249
1250         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP;
1258                 }
1259         }
1260         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262                         adv = ADVERTISE_1000XPSE_ASYM;
1263                 }
1264                 else {
1265                         adv = ADVERTISE_PAUSE_ASYM;
1266                 }
1267         }
1268         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271                 }
1272                 else {
1273                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274                 }
1275         }
1276         return adv;
1277 }
1278
1279 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
1281 static int
1282 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283 {
1284         u32 speed_arg = 0, pause_adv;
1285
1286         pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288         if (bp->autoneg & AUTONEG_SPEED) {
1289                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290                 if (bp->advertising & ADVERTISED_10baseT_Half)
1291                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292                 if (bp->advertising & ADVERTISED_10baseT_Full)
1293                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294                 if (bp->advertising & ADVERTISED_100baseT_Half)
1295                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 if (bp->advertising & ADVERTISED_100baseT_Full)
1297                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302         } else {
1303                 if (bp->req_line_speed == SPEED_2500)
1304                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305                 else if (bp->req_line_speed == SPEED_1000)
1306                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307                 else if (bp->req_line_speed == SPEED_100) {
1308                         if (bp->req_duplex == DUPLEX_FULL)
1309                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310                         else
1311                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312                 } else if (bp->req_line_speed == SPEED_10) {
1313                         if (bp->req_duplex == DUPLEX_FULL)
1314                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315                         else
1316                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317                 }
1318         }
1319
1320         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325         if (port == PORT_TP)
1326                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331         spin_unlock_bh(&bp->phy_lock);
1332         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333         spin_lock_bh(&bp->phy_lock);
1334
1335         return 0;
1336 }
1337
1338 static int
1339 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1340 {
1341         u32 adv, bmcr;
1342         u32 new_adv = 0;
1343
1344         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345                 return (bnx2_setup_remote_phy(bp, port));
1346
1347         if (!(bp->autoneg & AUTONEG_SPEED)) {
1348                 u32 new_bmcr;
1349                 int force_link_down = 0;
1350
1351                 if (bp->req_line_speed == SPEED_2500) {
1352                         if (!bnx2_test_and_enable_2g5(bp))
1353                                 force_link_down = 1;
1354                 } else if (bp->req_line_speed == SPEED_1000) {
1355                         if (bnx2_test_and_disable_2g5(bp))
1356                                 force_link_down = 1;
1357                 }
1358                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
1361                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1362                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1363                 new_bmcr |= BMCR_SPEED1000;
1364
1365                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366                         if (bp->req_line_speed == SPEED_2500)
1367                                 bnx2_enable_forced_2g5(bp);
1368                         else if (bp->req_line_speed == SPEED_1000) {
1369                                 bnx2_disable_forced_2g5(bp);
1370                                 new_bmcr &= ~0x2000;
1371                         }
1372
1373                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1374                         if (bp->req_line_speed == SPEED_2500)
1375                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376                         else
1377                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1378                 }
1379
1380                 if (bp->req_duplex == DUPLEX_FULL) {
1381                         adv |= ADVERTISE_1000XFULL;
1382                         new_bmcr |= BMCR_FULLDPLX;
1383                 }
1384                 else {
1385                         adv |= ADVERTISE_1000XHALF;
1386                         new_bmcr &= ~BMCR_FULLDPLX;
1387                 }
1388                 if ((new_bmcr != bmcr) || (force_link_down)) {
1389                         /* Force a link down visible on the other side */
1390                         if (bp->link_up) {
1391                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1392                                                ~(ADVERTISE_1000XFULL |
1393                                                  ADVERTISE_1000XHALF));
1394                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1395                                         BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397                                 bp->link_up = 0;
1398                                 netif_carrier_off(bp->dev);
1399                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1400                                 bnx2_report_link(bp);
1401                         }
1402                         bnx2_write_phy(bp, bp->mii_adv, adv);
1403                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1404                 } else {
1405                         bnx2_resolve_flow_ctrl(bp);
1406                         bnx2_set_mac_link(bp);
1407                 }
1408                 return 0;
1409         }
1410
1411         bnx2_test_and_enable_2g5(bp);
1412
1413         if (bp->advertising & ADVERTISED_1000baseT_Full)
1414                 new_adv |= ADVERTISE_1000XFULL;
1415
1416         new_adv |= bnx2_phy_get_pause_adv(bp);
1417
1418         bnx2_read_phy(bp, bp->mii_adv, &adv);
1419         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1420
1421         bp->serdes_an_pending = 0;
1422         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423                 /* Force a link down visible on the other side */
1424                 if (bp->link_up) {
1425                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1426                         spin_unlock_bh(&bp->phy_lock);
1427                         msleep(20);
1428                         spin_lock_bh(&bp->phy_lock);
1429                 }
1430
1431                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1433                         BMCR_ANENABLE);
1434                 /* Speed up link-up time when the link partner
1435                  * does not autonegotiate which is very common
1436                  * in blade servers. Some blade servers use
1437                  * IPMI for kerboard input and it's important
1438                  * to minimize link disruptions. Autoneg. involves
1439                  * exchanging base pages plus 3 next pages and
1440                  * normally completes in about 120 msec.
1441                  */
1442                 bp->current_interval = SERDES_AN_TIMEOUT;
1443                 bp->serdes_an_pending = 1;
1444                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1445         } else {
1446                 bnx2_resolve_flow_ctrl(bp);
1447                 bnx2_set_mac_link(bp);
1448         }
1449
1450         return 0;
1451 }
1452
1453 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1454         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1455                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456                 (ADVERTISED_1000baseT_Full)
1457
1458 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1459         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1460         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1461         ADVERTISED_1000baseT_Full)
1462
1463 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1465
1466 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
1468 static void
1469 bnx2_set_default_remote_link(struct bnx2 *bp)
1470 {
1471         u32 link;
1472
1473         if (bp->phy_port == PORT_TP)
1474                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475         else
1476                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479                 bp->req_line_speed = 0;
1480                 bp->autoneg |= AUTONEG_SPEED;
1481                 bp->advertising = ADVERTISED_Autoneg;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483                         bp->advertising |= ADVERTISED_10baseT_Half;
1484                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485                         bp->advertising |= ADVERTISED_10baseT_Full;
1486                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487                         bp->advertising |= ADVERTISED_100baseT_Half;
1488                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489                         bp->advertising |= ADVERTISED_100baseT_Full;
1490                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491                         bp->advertising |= ADVERTISED_1000baseT_Full;
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493                         bp->advertising |= ADVERTISED_2500baseX_Full;
1494         } else {
1495                 bp->autoneg = 0;
1496                 bp->advertising = 0;
1497                 bp->req_duplex = DUPLEX_FULL;
1498                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499                         bp->req_line_speed = SPEED_10;
1500                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501                                 bp->req_duplex = DUPLEX_HALF;
1502                 }
1503                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504                         bp->req_line_speed = SPEED_100;
1505                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506                                 bp->req_duplex = DUPLEX_HALF;
1507                 }
1508                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509                         bp->req_line_speed = SPEED_1000;
1510                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511                         bp->req_line_speed = SPEED_2500;
1512         }
1513 }
1514
1515 static void
1516 bnx2_set_default_link(struct bnx2 *bp)
1517 {
1518         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519                 return bnx2_set_default_remote_link(bp);
1520
1521         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522         bp->req_line_speed = 0;
1523         if (bp->phy_flags & PHY_SERDES_FLAG) {
1524                 u32 reg;
1525
1526                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531                         bp->autoneg = 0;
1532                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1533                         bp->req_duplex = DUPLEX_FULL;
1534                 }
1535         } else
1536                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537 }
1538
1539 static void
1540 bnx2_send_heart_beat(struct bnx2 *bp)
1541 {
1542         u32 msg;
1543         u32 addr;
1544
1545         spin_lock(&bp->indirect_lock);
1546         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550         spin_unlock(&bp->indirect_lock);
1551 }
1552
1553 static void
1554 bnx2_remote_phy_event(struct bnx2 *bp)
1555 {
1556         u32 msg;
1557         u8 link_up = bp->link_up;
1558         u8 old_port;
1559
1560         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
1562         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563                 bnx2_send_heart_beat(bp);
1564
1565         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
1567         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568                 bp->link_up = 0;
1569         else {
1570                 u32 speed;
1571
1572                 bp->link_up = 1;
1573                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574                 bp->duplex = DUPLEX_FULL;
1575                 switch (speed) {
1576                         case BNX2_LINK_STATUS_10HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_10FULL:
1579                                 bp->line_speed = SPEED_10;
1580                                 break;
1581                         case BNX2_LINK_STATUS_100HALF:
1582                                 bp->duplex = DUPLEX_HALF;
1583                         case BNX2_LINK_STATUS_100BASE_T4:
1584                         case BNX2_LINK_STATUS_100FULL:
1585                                 bp->line_speed = SPEED_100;
1586                                 break;
1587                         case BNX2_LINK_STATUS_1000HALF:
1588                                 bp->duplex = DUPLEX_HALF;
1589                         case BNX2_LINK_STATUS_1000FULL:
1590                                 bp->line_speed = SPEED_1000;
1591                                 break;
1592                         case BNX2_LINK_STATUS_2500HALF:
1593                                 bp->duplex = DUPLEX_HALF;
1594                         case BNX2_LINK_STATUS_2500FULL:
1595                                 bp->line_speed = SPEED_2500;
1596                                 break;
1597                         default:
1598                                 bp->line_speed = 0;
1599                                 break;
1600                 }
1601
1602                 spin_lock(&bp->phy_lock);
1603                 bp->flow_ctrl = 0;
1604                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606                         if (bp->duplex == DUPLEX_FULL)
1607                                 bp->flow_ctrl = bp->req_flow_ctrl;
1608                 } else {
1609                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1611                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1613                 }
1614
1615                 old_port = bp->phy_port;
1616                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617                         bp->phy_port = PORT_FIBRE;
1618                 else
1619                         bp->phy_port = PORT_TP;
1620
1621                 if (old_port != bp->phy_port)
1622                         bnx2_set_default_link(bp);
1623
1624                 spin_unlock(&bp->phy_lock);
1625         }
1626         if (bp->link_up != link_up)
1627                 bnx2_report_link(bp);
1628
1629         bnx2_set_mac_link(bp);
1630 }
1631
1632 static int
1633 bnx2_set_remote_link(struct bnx2 *bp)
1634 {
1635         u32 evt_code;
1636
1637         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638         switch (evt_code) {
1639                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640                         bnx2_remote_phy_event(bp);
1641                         break;
1642                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643                 default:
1644                         bnx2_send_heart_beat(bp);
1645                         break;
1646         }
1647         return 0;
1648 }
1649
1650 static int
1651 bnx2_setup_copper_phy(struct bnx2 *bp)
1652 {
1653         u32 bmcr;
1654         u32 new_bmcr;
1655
1656         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1657
1658         if (bp->autoneg & AUTONEG_SPEED) {
1659                 u32 adv_reg, adv1000_reg;
1660                 u32 new_adv_reg = 0;
1661                 u32 new_adv1000_reg = 0;
1662
1663                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1664                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665                         ADVERTISE_PAUSE_ASYM);
1666
1667                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668                 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670                 if (bp->advertising & ADVERTISED_10baseT_Half)
1671                         new_adv_reg |= ADVERTISE_10HALF;
1672                 if (bp->advertising & ADVERTISED_10baseT_Full)
1673                         new_adv_reg |= ADVERTISE_10FULL;
1674                 if (bp->advertising & ADVERTISED_100baseT_Half)
1675                         new_adv_reg |= ADVERTISE_100HALF;
1676                 if (bp->advertising & ADVERTISED_100baseT_Full)
1677                         new_adv_reg |= ADVERTISE_100FULL;
1678                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679                         new_adv1000_reg |= ADVERTISE_1000FULL;
1680
1681                 new_adv_reg |= ADVERTISE_CSMA;
1682
1683                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685                 if ((adv1000_reg != new_adv1000_reg) ||
1686                         (adv_reg != new_adv_reg) ||
1687                         ((bmcr & BMCR_ANENABLE) == 0)) {
1688
1689                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1690                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1691                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1692                                 BMCR_ANENABLE);
1693                 }
1694                 else if (bp->link_up) {
1695                         /* Flow ctrl may have changed from auto to forced */
1696                         /* or vice-versa. */
1697
1698                         bnx2_resolve_flow_ctrl(bp);
1699                         bnx2_set_mac_link(bp);
1700                 }
1701                 return 0;
1702         }
1703
1704         new_bmcr = 0;
1705         if (bp->req_line_speed == SPEED_100) {
1706                 new_bmcr |= BMCR_SPEED100;
1707         }
1708         if (bp->req_duplex == DUPLEX_FULL) {
1709                 new_bmcr |= BMCR_FULLDPLX;
1710         }
1711         if (new_bmcr != bmcr) {
1712                 u32 bmsr;
1713
1714                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1716
1717                 if (bmsr & BMSR_LSTATUS) {
1718                         /* Force link down */
1719                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1720                         spin_unlock_bh(&bp->phy_lock);
1721                         msleep(50);
1722                         spin_lock_bh(&bp->phy_lock);
1723
1724                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1726                 }
1727
1728                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1729
1730                 /* Normally, the new speed is setup after the link has
1731                  * gone down and up again. In some cases, link will not go
1732                  * down so we need to set up the new speed here.
1733                  */
1734                 if (bmsr & BMSR_LSTATUS) {
1735                         bp->line_speed = bp->req_line_speed;
1736                         bp->duplex = bp->req_duplex;
1737                         bnx2_resolve_flow_ctrl(bp);
1738                         bnx2_set_mac_link(bp);
1739                 }
1740         } else {
1741                 bnx2_resolve_flow_ctrl(bp);
1742                 bnx2_set_mac_link(bp);
1743         }
1744         return 0;
1745 }
1746
1747 static int
1748 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1749 {
1750         if (bp->loopback == MAC_LOOPBACK)
1751                 return 0;
1752
1753         if (bp->phy_flags & PHY_SERDES_FLAG) {
1754                 return (bnx2_setup_serdes_phy(bp, port));
1755         }
1756         else {
1757                 return (bnx2_setup_copper_phy(bp));
1758         }
1759 }
1760
1761 static int
1762 bnx2_init_5709s_phy(struct bnx2 *bp)
1763 {
1764         u32 val;
1765
1766         bp->mii_bmcr = MII_BMCR + 0x10;
1767         bp->mii_bmsr = MII_BMSR + 0x10;
1768         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769         bp->mii_adv = MII_ADVERTISE + 0x10;
1770         bp->mii_lpa = MII_LPA + 0x10;
1771         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777         bnx2_reset_phy(bp);
1778
1779         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789                 val |= BCM5708S_UP1_2G5;
1790         else
1791                 val &= ~BCM5708S_UP1_2G5;
1792         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807         return 0;
1808 }
1809
1810 static int
1811 bnx2_init_5708s_phy(struct bnx2 *bp)
1812 {
1813         u32 val;
1814
1815         bnx2_reset_phy(bp);
1816
1817         bp->mii_up1 = BCM5708S_UP1;
1818
1819         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833                 val |= BCM5708S_UP1_2G5;
1834                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835         }
1836
1837         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1838             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1840                 /* increase tx signal amplitude */
1841                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842                                BCM5708S_BLK_ADDR_TX_MISC);
1843                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847         }
1848
1849         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1850               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852         if (val) {
1853                 u32 is_backplane;
1854
1855                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1856                                           BNX2_SHARED_HW_CFG_CONFIG);
1857                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859                                        BCM5708S_BLK_ADDR_TX_MISC);
1860                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862                                        BCM5708S_BLK_ADDR_DIG);
1863                 }
1864         }
1865         return 0;
1866 }
1867
1868 static int
1869 bnx2_init_5706s_phy(struct bnx2 *bp)
1870 {
1871         bnx2_reset_phy(bp);
1872
1873         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
1875         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1877
1878         if (bp->dev->mtu > 1500) {
1879                 u32 val;
1880
1881                 /* Set extended packet length bit */
1882                 bnx2_write_phy(bp, 0x18, 0x7);
1883                 bnx2_read_phy(bp, 0x18, &val);
1884                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887                 bnx2_read_phy(bp, 0x1c, &val);
1888                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889         }
1890         else {
1891                 u32 val;
1892
1893                 bnx2_write_phy(bp, 0x18, 0x7);
1894                 bnx2_read_phy(bp, 0x18, &val);
1895                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898                 bnx2_read_phy(bp, 0x1c, &val);
1899                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900         }
1901
1902         return 0;
1903 }
1904
1905 static int
1906 bnx2_init_copper_phy(struct bnx2 *bp)
1907 {
1908         u32 val;
1909
1910         bnx2_reset_phy(bp);
1911
1912         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913                 bnx2_write_phy(bp, 0x18, 0x0c00);
1914                 bnx2_write_phy(bp, 0x17, 0x000a);
1915                 bnx2_write_phy(bp, 0x15, 0x310b);
1916                 bnx2_write_phy(bp, 0x17, 0x201f);
1917                 bnx2_write_phy(bp, 0x15, 0x9506);
1918                 bnx2_write_phy(bp, 0x17, 0x401f);
1919                 bnx2_write_phy(bp, 0x15, 0x14e2);
1920                 bnx2_write_phy(bp, 0x18, 0x0400);
1921         }
1922
1923         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1926                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927                 val &= ~(1 << 8);
1928                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929         }
1930
1931         if (bp->dev->mtu > 1500) {
1932                 /* Set extended packet length bit */
1933                 bnx2_write_phy(bp, 0x18, 0x7);
1934                 bnx2_read_phy(bp, 0x18, &val);
1935                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937                 bnx2_read_phy(bp, 0x10, &val);
1938                 bnx2_write_phy(bp, 0x10, val | 0x1);
1939         }
1940         else {
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945                 bnx2_read_phy(bp, 0x10, &val);
1946                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947         }
1948
1949         /* ethernet@wirespeed */
1950         bnx2_write_phy(bp, 0x18, 0x7007);
1951         bnx2_read_phy(bp, 0x18, &val);
1952         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1953         return 0;
1954 }
1955
1956
1957 static int
1958 bnx2_init_phy(struct bnx2 *bp)
1959 {
1960         u32 val;
1961         int rc = 0;
1962
1963         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
1966         bp->mii_bmcr = MII_BMCR;
1967         bp->mii_bmsr = MII_BMSR;
1968         bp->mii_bmsr1 = MII_BMSR;
1969         bp->mii_adv = MII_ADVERTISE;
1970         bp->mii_lpa = MII_LPA;
1971
1972         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
1974         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975                 goto setup_phy;
1976
1977         bnx2_read_phy(bp, MII_PHYSID1, &val);
1978         bp->phy_id = val << 16;
1979         bnx2_read_phy(bp, MII_PHYSID2, &val);
1980         bp->phy_id |= val & 0xffff;
1981
1982         if (bp->phy_flags & PHY_SERDES_FLAG) {
1983                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984                         rc = bnx2_init_5706s_phy(bp);
1985                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986                         rc = bnx2_init_5708s_phy(bp);
1987                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988                         rc = bnx2_init_5709s_phy(bp);
1989         }
1990         else {
1991                 rc = bnx2_init_copper_phy(bp);
1992         }
1993
1994 setup_phy:
1995         if (!rc)
1996                 rc = bnx2_setup_phy(bp, bp->phy_port);
1997
1998         return rc;
1999 }
2000
2001 static int
2002 bnx2_set_mac_loopback(struct bnx2 *bp)
2003 {
2004         u32 mac_mode;
2005
2006         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010         bp->link_up = 1;
2011         return 0;
2012 }
2013
2014 static int bnx2_test_link(struct bnx2 *);
2015
2016 static int
2017 bnx2_set_phy_loopback(struct bnx2 *bp)
2018 {
2019         u32 mac_mode;
2020         int rc, i;
2021
2022         spin_lock_bh(&bp->phy_lock);
2023         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2024                             BMCR_SPEED1000);
2025         spin_unlock_bh(&bp->phy_lock);
2026         if (rc)
2027                 return rc;
2028
2029         for (i = 0; i < 10; i++) {
2030                 if (bnx2_test_link(bp) == 0)
2031                         break;
2032                 msleep(100);
2033         }
2034
2035         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2038                       BNX2_EMAC_MODE_25G_MODE);
2039
2040         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042         bp->link_up = 1;
2043         return 0;
2044 }
2045
2046 static int
2047 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2048 {
2049         int i;
2050         u32 val;
2051
2052         bp->fw_wr_seq++;
2053         msg_data |= bp->fw_wr_seq;
2054
2055         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2056
2057         /* wait for an acknowledgement. */
2058         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059                 msleep(10);
2060
2061                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2062
2063                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064                         break;
2065         }
2066         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067                 return 0;
2068
2069         /* If we timed out, inform the firmware that this is the case. */
2070         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071                 if (!silent)
2072                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073                                             "%x\n", msg_data);
2074
2075                 msg_data &= ~BNX2_DRV_MSG_CODE;
2076                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
2078                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2079
2080                 return -EBUSY;
2081         }
2082
2083         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084                 return -EIO;
2085
2086         return 0;
2087 }
2088
2089 static int
2090 bnx2_init_5709_context(struct bnx2 *bp)
2091 {
2092         int i, ret = 0;
2093         u32 val;
2094
2095         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096         val |= (BCM_PAGE_BITS - 8) << 16;
2097         REG_WR(bp, BNX2_CTX_COMMAND, val);
2098         for (i = 0; i < 10; i++) {
2099                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101                         break;
2102                 udelay(2);
2103         }
2104         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105                 return -EBUSY;
2106
2107         for (i = 0; i < bp->ctx_pages; i++) {
2108                 int j;
2109
2110                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114                        (u64) bp->ctx_blk_mapping[i] >> 32);
2115                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117                 for (j = 0; j < 10; j++) {
2118
2119                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121                                 break;
2122                         udelay(5);
2123                 }
2124                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125                         ret = -EBUSY;
2126                         break;
2127                 }
2128         }
2129         return ret;
2130 }
2131
2132 static void
2133 bnx2_init_context(struct bnx2 *bp)
2134 {
2135         u32 vcid;
2136
2137         vcid = 96;
2138         while (vcid) {
2139                 u32 vcid_addr, pcid_addr, offset;
2140                 int i;
2141
2142                 vcid--;
2143
2144                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145                         u32 new_vcid;
2146
2147                         vcid_addr = GET_PCID_ADDR(vcid);
2148                         if (vcid & 0x8) {
2149                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150                         }
2151                         else {
2152                                 new_vcid = vcid;
2153                         }
2154                         pcid_addr = GET_PCID_ADDR(new_vcid);
2155                 }
2156                 else {
2157                         vcid_addr = GET_CID_ADDR(vcid);
2158                         pcid_addr = vcid_addr;
2159                 }
2160
2161                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162                         vcid_addr += (i << PHY_CTX_SHIFT);
2163                         pcid_addr += (i << PHY_CTX_SHIFT);
2164
2165                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2166                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2167
2168                         /* Zero out the context. */
2169                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2170                                 CTX_WR(bp, vcid_addr, offset, 0);
2171                 }
2172         }
2173 }
2174
2175 static int
2176 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177 {
2178         u16 *good_mbuf;
2179         u32 good_mbuf_cnt;
2180         u32 val;
2181
2182         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183         if (good_mbuf == NULL) {
2184                 printk(KERN_ERR PFX "Failed to allocate memory in "
2185                                     "bnx2_alloc_bad_rbuf\n");
2186                 return -ENOMEM;
2187         }
2188
2189         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192         good_mbuf_cnt = 0;
2193
2194         /* Allocate a bunch of mbufs and save the good ones in an array. */
2195         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203                 /* The addresses with Bit 9 set are bad memory blocks. */
2204                 if (!(val & (1 << 9))) {
2205                         good_mbuf[good_mbuf_cnt] = (u16) val;
2206                         good_mbuf_cnt++;
2207                 }
2208
2209                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210         }
2211
2212         /* Free the good ones back to the mbuf pool thus discarding
2213          * all the bad ones. */
2214         while (good_mbuf_cnt) {
2215                 good_mbuf_cnt--;
2216
2217                 val = good_mbuf[good_mbuf_cnt];
2218                 val = (val << 9) | val | 1;
2219
2220                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221         }
2222         kfree(good_mbuf);
2223         return 0;
2224 }
2225
2226 static void
2227 bnx2_set_mac_addr(struct bnx2 *bp)
2228 {
2229         u32 val;
2230         u8 *mac_addr = bp->dev->dev_addr;
2231
2232         val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
2236         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2237                 (mac_addr[4] << 8) | mac_addr[5];
2238
2239         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240 }
2241
2242 static inline int
2243 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244 {
2245         dma_addr_t mapping;
2246         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247         struct rx_bd *rxbd =
2248                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249         struct page *page = alloc_page(GFP_ATOMIC);
2250
2251         if (!page)
2252                 return -ENOMEM;
2253         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254                                PCI_DMA_FROMDEVICE);
2255         rx_pg->page = page;
2256         pci_unmap_addr_set(rx_pg, mapping, mapping);
2257         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259         return 0;
2260 }
2261
2262 static void
2263 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264 {
2265         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266         struct page *page = rx_pg->page;
2267
2268         if (!page)
2269                 return;
2270
2271         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272                        PCI_DMA_FROMDEVICE);
2273
2274         __free_page(page);
2275         rx_pg->page = NULL;
2276 }
2277
2278 static inline int
2279 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2280 {
2281         struct sk_buff *skb;
2282         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283         dma_addr_t mapping;
2284         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2285         unsigned long align;
2286
2287         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2288         if (skb == NULL) {
2289                 return -ENOMEM;
2290         }
2291
2292         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2294
2295         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296                 PCI_DMA_FROMDEVICE);
2297
2298         rx_buf->skb = skb;
2299         pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304         bp->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306         return 0;
2307 }
2308
2309 static int
2310 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2311 {
2312         struct status_block *sblk = bnapi->status_blk;
2313         u32 new_link_state, old_link_state;
2314         int is_set = 1;
2315
2316         new_link_state = sblk->status_attn_bits & event;
2317         old_link_state = sblk->status_attn_bits_ack & event;
2318         if (new_link_state != old_link_state) {
2319                 if (new_link_state)
2320                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321                 else
2322                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323         } else
2324                 is_set = 0;
2325
2326         return is_set;
2327 }
2328
2329 static void
2330 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2331 {
2332         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2333                 spin_lock(&bp->phy_lock);
2334                 bnx2_set_link(bp);
2335                 spin_unlock(&bp->phy_lock);
2336         }
2337         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2338                 bnx2_set_remote_link(bp);
2339
2340 }
2341
2342 static inline u16
2343 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2344 {
2345         u16 cons;
2346
2347         cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2348
2349         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350                 cons++;
2351         return cons;
2352 }
2353
2354 static void
2355 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2356 {
2357         u16 hw_cons, sw_cons, sw_ring_cons;
2358         int tx_free_bd = 0;
2359
2360         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2361         sw_cons = bnapi->tx_cons;
2362
2363         while (sw_cons != hw_cons) {
2364                 struct sw_bd *tx_buf;
2365                 struct sk_buff *skb;
2366                 int i, last;
2367
2368                 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371                 skb = tx_buf->skb;
2372
2373                 /* partial BD completions possible with TSO packets */
2374                 if (skb_is_gso(skb)) {
2375                         u16 last_idx, last_ring_idx;
2376
2377                         last_idx = sw_cons +
2378                                 skb_shinfo(skb)->nr_frags + 1;
2379                         last_ring_idx = sw_ring_cons +
2380                                 skb_shinfo(skb)->nr_frags + 1;
2381                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382                                 last_idx++;
2383                         }
2384                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385                                 break;
2386                         }
2387                 }
2388
2389                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390                         skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392                 tx_buf->skb = NULL;
2393                 last = skb_shinfo(skb)->nr_frags;
2394
2395                 for (i = 0; i < last; i++) {
2396                         sw_cons = NEXT_TX_BD(sw_cons);
2397
2398                         pci_unmap_page(bp->pdev,
2399                                 pci_unmap_addr(
2400                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401                                         mapping),
2402                                 skb_shinfo(skb)->frags[i].size,
2403                                 PCI_DMA_TODEVICE);
2404                 }
2405
2406                 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408                 tx_free_bd += last + 1;
2409
2410                 dev_kfree_skb(skb);
2411
2412                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2413         }
2414
2415         bnapi->hw_tx_cons = hw_cons;
2416         bnapi->tx_cons = sw_cons;
2417         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418          * before checking for netif_queue_stopped().  Without the
2419          * memory barrier, there is a small possibility that bnx2_start_xmit()
2420          * will miss it and cause the queue to be stopped forever.
2421          */
2422         smp_mb();
2423
2424         if (unlikely(netif_queue_stopped(bp->dev)) &&
2425                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2426                 netif_tx_lock(bp->dev);
2427                 if ((netif_queue_stopped(bp->dev)) &&
2428                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2429                         netif_wake_queue(bp->dev);
2430                 netif_tx_unlock(bp->dev);
2431         }
2432 }
2433
2434 static void
2435 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2436 {
2437         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438         struct rx_bd *cons_bd, *prod_bd;
2439         dma_addr_t mapping;
2440         int i;
2441         u16 hw_prod = bp->rx_pg_prod, prod;
2442         u16 cons = bp->rx_pg_cons;
2443
2444         for (i = 0; i < count; i++) {
2445                 prod = RX_PG_RING_IDX(hw_prod);
2446
2447                 prod_rx_pg = &bp->rx_pg_ring[prod];
2448                 cons_rx_pg = &bp->rx_pg_ring[cons];
2449                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2450                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2451
2452                 if (i == 0 && skb) {
2453                         struct page *page;
2454                         struct skb_shared_info *shinfo;
2455
2456                         shinfo = skb_shinfo(skb);
2457                         shinfo->nr_frags--;
2458                         page = shinfo->frags[shinfo->nr_frags].page;
2459                         shinfo->frags[shinfo->nr_frags].page = NULL;
2460                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2461                                                PCI_DMA_FROMDEVICE);
2462                         cons_rx_pg->page = page;
2463                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2464                         dev_kfree_skb(skb);
2465                 }
2466                 if (prod != cons) {
2467                         prod_rx_pg->page = cons_rx_pg->page;
2468                         cons_rx_pg->page = NULL;
2469                         pci_unmap_addr_set(prod_rx_pg, mapping,
2470                                 pci_unmap_addr(cons_rx_pg, mapping));
2471
2472                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2473                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2474
2475                 }
2476                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477                 hw_prod = NEXT_RX_BD(hw_prod);
2478         }
2479         bp->rx_pg_prod = hw_prod;
2480         bp->rx_pg_cons = cons;
2481 }
2482
2483 static inline void
2484 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2485         u16 cons, u16 prod)
2486 {
2487         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2488         struct rx_bd *cons_bd, *prod_bd;
2489
2490         cons_rx_buf = &bp->rx_buf_ring[cons];
2491         prod_rx_buf = &bp->rx_buf_ring[prod];
2492
2493         pci_dma_sync_single_for_device(bp->pdev,
2494                 pci_unmap_addr(cons_rx_buf, mapping),
2495                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496
2497         bp->rx_prod_bseq += bp->rx_buf_use_size;
2498
2499         prod_rx_buf->skb = skb;
2500
2501         if (cons == prod)
2502                 return;
2503
2504         pci_unmap_addr_set(prod_rx_buf, mapping,
2505                         pci_unmap_addr(cons_rx_buf, mapping));
2506
2507         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2508         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2509         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2510         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2511 }
2512
2513 static int
2514 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2515             unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2516 {
2517         int err;
2518         u16 prod = ring_idx & 0xffff;
2519
2520         err = bnx2_alloc_rx_skb(bp, prod);
2521         if (unlikely(err)) {
2522                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2523                 if (hdr_len) {
2524                         unsigned int raw_len = len + 4;
2525                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526
2527                         bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2528                 }
2529                 return err;
2530         }
2531
2532         skb_reserve(skb, bp->rx_offset);
2533         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2534                          PCI_DMA_FROMDEVICE);
2535
2536         if (hdr_len == 0) {
2537                 skb_put(skb, len);
2538                 return 0;
2539         } else {
2540                 unsigned int i, frag_len, frag_size, pages;
2541                 struct sw_pg *rx_pg;
2542                 u16 pg_cons = bp->rx_pg_cons;
2543                 u16 pg_prod = bp->rx_pg_prod;
2544
2545                 frag_size = len + 4 - hdr_len;
2546                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2547                 skb_put(skb, hdr_len);
2548
2549                 for (i = 0; i < pages; i++) {
2550                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2551                         if (unlikely(frag_len <= 4)) {
2552                                 unsigned int tail = 4 - frag_len;
2553
2554                                 bp->rx_pg_cons = pg_cons;
2555                                 bp->rx_pg_prod = pg_prod;
2556                                 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2557                                 skb->len -= tail;
2558                                 if (i == 0) {
2559                                         skb->tail -= tail;
2560                                 } else {
2561                                         skb_frag_t *frag =
2562                                                 &skb_shinfo(skb)->frags[i - 1];
2563                                         frag->size -= tail;
2564                                         skb->data_len -= tail;
2565                                         skb->truesize -= tail;
2566                                 }
2567                                 return 0;
2568                         }
2569                         rx_pg = &bp->rx_pg_ring[pg_cons];
2570
2571                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2572                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2573
2574                         if (i == pages - 1)
2575                                 frag_len -= 4;
2576
2577                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2578                         rx_pg->page = NULL;
2579
2580                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581                         if (unlikely(err)) {
2582                                 bp->rx_pg_cons = pg_cons;
2583                                 bp->rx_pg_prod = pg_prod;
2584                                 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2585                                 return err;
2586                         }
2587
2588                         frag_size -= frag_len;
2589                         skb->data_len += frag_len;
2590                         skb->truesize += frag_len;
2591                         skb->len += frag_len;
2592
2593                         pg_prod = NEXT_RX_BD(pg_prod);
2594                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595                 }
2596                 bp->rx_pg_prod = pg_prod;
2597                 bp->rx_pg_cons = pg_cons;
2598         }
2599         return 0;
2600 }
2601
2602 static inline u16
2603 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2604 {
2605         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2606
2607         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2608                 cons++;
2609         return cons;
2610 }
2611
2612 static int
2613 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2614 {
2615         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2616         struct l2_fhdr *rx_hdr;
2617         int rx_pkt = 0, pg_ring_used = 0;
2618
2619         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2620         sw_cons = bp->rx_cons;
2621         sw_prod = bp->rx_prod;
2622
2623         /* Memory barrier necessary as speculative reads of the rx
2624          * buffer can be ahead of the index in the status block
2625          */
2626         rmb();
2627         while (sw_cons != hw_cons) {
2628                 unsigned int len, hdr_len;
2629                 u32 status;
2630                 struct sw_bd *rx_buf;
2631                 struct sk_buff *skb;
2632                 dma_addr_t dma_addr;
2633
2634                 sw_ring_cons = RX_RING_IDX(sw_cons);
2635                 sw_ring_prod = RX_RING_IDX(sw_prod);
2636
2637                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2638                 skb = rx_buf->skb;
2639
2640                 rx_buf->skb = NULL;
2641
2642                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2643
2644                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2645                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2646
2647                 rx_hdr = (struct l2_fhdr *) skb->data;
2648                 len = rx_hdr->l2_fhdr_pkt_len;
2649
2650                 if ((status = rx_hdr->l2_fhdr_status) &
2651                         (L2_FHDR_ERRORS_BAD_CRC |
2652                         L2_FHDR_ERRORS_PHY_DECODE |
2653                         L2_FHDR_ERRORS_ALIGNMENT |
2654                         L2_FHDR_ERRORS_TOO_SHORT |
2655                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2656
2657                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2658                         goto next_rx;
2659                 }
2660                 hdr_len = 0;
2661                 if (status & L2_FHDR_STATUS_SPLIT) {
2662                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2663                         pg_ring_used = 1;
2664                 } else if (len > bp->rx_jumbo_thresh) {
2665                         hdr_len = bp->rx_jumbo_thresh;
2666                         pg_ring_used = 1;
2667                 }
2668
2669                 len -= 4;
2670
2671                 if (len <= bp->rx_copy_thresh) {
2672                         struct sk_buff *new_skb;
2673
2674                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2675                         if (new_skb == NULL) {
2676                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2677                                                   sw_ring_prod);
2678                                 goto next_rx;
2679                         }
2680
2681                         /* aligned copy */
2682                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2683                                       new_skb->data, len + 2);
2684                         skb_reserve(new_skb, 2);
2685                         skb_put(new_skb, len);
2686
2687                         bnx2_reuse_rx_skb(bp, skb,
2688                                 sw_ring_cons, sw_ring_prod);
2689
2690                         skb = new_skb;
2691                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2692                                     (sw_ring_cons << 16) | sw_ring_prod)))
2693                         goto next_rx;
2694
2695                 skb->protocol = eth_type_trans(skb, bp->dev);
2696
2697                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2698                         (ntohs(skb->protocol) != 0x8100)) {
2699
2700                         dev_kfree_skb(skb);
2701                         goto next_rx;
2702
2703                 }
2704
2705                 skb->ip_summed = CHECKSUM_NONE;
2706                 if (bp->rx_csum &&
2707                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2708                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2709
2710                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2711                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2712                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2713                 }
2714
2715 #ifdef BCM_VLAN
2716                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2717                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2718                                 rx_hdr->l2_fhdr_vlan_tag);
2719                 }
2720                 else
2721 #endif
2722                         netif_receive_skb(skb);
2723
2724                 bp->dev->last_rx = jiffies;
2725                 rx_pkt++;
2726
2727 next_rx:
2728                 sw_cons = NEXT_RX_BD(sw_cons);
2729                 sw_prod = NEXT_RX_BD(sw_prod);
2730
2731                 if ((rx_pkt == budget))
2732                         break;
2733
2734                 /* Refresh hw_cons to see if there is new work */
2735                 if (sw_cons == hw_cons) {
2736                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2737                         rmb();
2738                 }
2739         }
2740         bp->rx_cons = sw_cons;
2741         bp->rx_prod = sw_prod;
2742
2743         if (pg_ring_used)
2744                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745                          bp->rx_pg_prod);
2746
2747         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748
2749         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2750
2751         mmiowb();
2752
2753         return rx_pkt;
2754
2755 }
2756
2757 /* MSI ISR - The only difference between this and the INTx ISR
2758  * is that the MSI interrupt is always serviced.
2759  */
2760 static irqreturn_t
2761 bnx2_msi(int irq, void *dev_instance)
2762 {
2763         struct net_device *dev = dev_instance;
2764         struct bnx2 *bp = netdev_priv(dev);
2765         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2766
2767         prefetch(bnapi->status_blk);
2768         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2769                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2770                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2771
2772         /* Return here if interrupt is disabled. */
2773         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774                 return IRQ_HANDLED;
2775
2776         netif_rx_schedule(dev, &bnapi->napi);
2777
2778         return IRQ_HANDLED;
2779 }
2780
2781 static irqreturn_t
2782 bnx2_msi_1shot(int irq, void *dev_instance)
2783 {
2784         struct net_device *dev = dev_instance;
2785         struct bnx2 *bp = netdev_priv(dev);
2786         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2787
2788         prefetch(bnapi->status_blk);
2789
2790         /* Return here if interrupt is disabled. */
2791         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2792                 return IRQ_HANDLED;
2793
2794         netif_rx_schedule(dev, &bnapi->napi);
2795
2796         return IRQ_HANDLED;
2797 }
2798
2799 static irqreturn_t
2800 bnx2_interrupt(int irq, void *dev_instance)
2801 {
2802         struct net_device *dev = dev_instance;
2803         struct bnx2 *bp = netdev_priv(dev);
2804         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2805         struct status_block *sblk = bnapi->status_blk;
2806
2807         /* When using INTx, it is possible for the interrupt to arrive
2808          * at the CPU before the status block posted prior to the
2809          * interrupt. Reading a register will flush the status block.
2810          * When using MSI, the MSI message will always complete after
2811          * the status block write.
2812          */
2813         if ((sblk->status_idx == bnapi->last_status_idx) &&
2814             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2815              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2816                 return IRQ_NONE;
2817
2818         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2819                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2820                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2821
2822         /* Read back to deassert IRQ immediately to avoid too many
2823          * spurious interrupts.
2824          */
2825         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2826
2827         /* Return here if interrupt is shared and is disabled. */
2828         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2829                 return IRQ_HANDLED;
2830
2831         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2832                 bnapi->last_status_idx = sblk->status_idx;
2833                 __netif_rx_schedule(dev, &bnapi->napi);
2834         }
2835
2836         return IRQ_HANDLED;
2837 }
2838
2839 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2840                                  STATUS_ATTN_BITS_TIMER_ABORT)
2841
2842 static inline int
2843 bnx2_has_work(struct bnx2_napi *bnapi)
2844 {
2845         struct bnx2 *bp = bnapi->bp;
2846         struct status_block *sblk = bp->status_blk;
2847
2848         if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) ||
2849             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2850                 return 1;
2851
2852         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2853             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2854                 return 1;
2855
2856         return 0;
2857 }
2858
2859 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2860                           int work_done, int budget)
2861 {
2862         struct status_block *sblk = bnapi->status_blk;
2863         u32 status_attn_bits = sblk->status_attn_bits;
2864         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2865
2866         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2867             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2868
2869                 bnx2_phy_int(bp, bnapi);
2870
2871                 /* This is needed to take care of transient status
2872                  * during link changes.
2873                  */
2874                 REG_WR(bp, BNX2_HC_COMMAND,
2875                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2876                 REG_RD(bp, BNX2_HC_COMMAND);
2877         }
2878
2879         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2880                 bnx2_tx_int(bp, bnapi);
2881
2882         if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons)
2883                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2884
2885         return work_done;
2886 }
2887
2888 static int bnx2_poll(struct napi_struct *napi, int budget)
2889 {
2890         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2891         struct bnx2 *bp = bnapi->bp;
2892         int work_done = 0;
2893         struct status_block *sblk = bnapi->status_blk;
2894
2895         while (1) {
2896                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2897
2898                 if (unlikely(work_done >= budget))
2899                         break;
2900
2901                 /* bnapi->last_status_idx is used below to tell the hw how
2902                  * much work has been processed, so we must read it before
2903                  * checking for more work.
2904                  */
2905                 bnapi->last_status_idx = sblk->status_idx;
2906                 rmb();
2907                 if (likely(!bnx2_has_work(bnapi))) {
2908                         netif_rx_complete(bp->dev, napi);
2909                         if (likely(bp->flags & USING_MSI_FLAG)) {
2910                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2911                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2912                                        bnapi->last_status_idx);
2913                                 break;
2914                         }
2915                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2917                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2918                                bnapi->last_status_idx);
2919
2920                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2922                                bnapi->last_status_idx);
2923                         break;
2924                 }
2925         }
2926
2927         return work_done;
2928 }
2929
2930 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2931  * from set_multicast.
2932  */
2933 static void
2934 bnx2_set_rx_mode(struct net_device *dev)
2935 {
2936         struct bnx2 *bp = netdev_priv(dev);
2937         u32 rx_mode, sort_mode;
2938         int i;
2939
2940         spin_lock_bh(&bp->phy_lock);
2941
2942         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2943                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2944         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2945 #ifdef BCM_VLAN
2946         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2947                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2948 #else
2949         if (!(bp->flags & ASF_ENABLE_FLAG))
2950                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2951 #endif
2952         if (dev->flags & IFF_PROMISC) {
2953                 /* Promiscuous mode. */
2954                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2955                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2956                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2957         }
2958         else if (dev->flags & IFF_ALLMULTI) {
2959                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961                                0xffffffff);
2962                 }
2963                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2964         }
2965         else {
2966                 /* Accept one or more multicast(s). */
2967                 struct dev_mc_list *mclist;
2968                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2969                 u32 regidx;
2970                 u32 bit;
2971                 u32 crc;
2972
2973                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2974
2975                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2976                      i++, mclist = mclist->next) {
2977
2978                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2979                         bit = crc & 0xff;
2980                         regidx = (bit & 0xe0) >> 5;
2981                         bit &= 0x1f;
2982                         mc_filter[regidx] |= (1 << bit);
2983                 }
2984
2985                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2986                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2987                                mc_filter[i]);
2988                 }
2989
2990                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2991         }
2992
2993         if (rx_mode != bp->rx_mode) {
2994                 bp->rx_mode = rx_mode;
2995                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2996         }
2997
2998         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2999         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3000         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3001
3002         spin_unlock_bh(&bp->phy_lock);
3003 }
3004
3005 static void
3006 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3007         u32 rv2p_proc)
3008 {
3009         int i;
3010         u32 val;
3011
3012
3013         for (i = 0; i < rv2p_code_len; i += 8) {
3014                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3015                 rv2p_code++;
3016                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3017                 rv2p_code++;
3018
3019                 if (rv2p_proc == RV2P_PROC1) {
3020                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3021                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3022                 }
3023                 else {
3024                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3025                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3026                 }
3027         }
3028
3029         /* Reset the processor, un-stall is done later. */
3030         if (rv2p_proc == RV2P_PROC1) {
3031                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3032         }
3033         else {
3034                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3035         }
3036 }
3037
3038 static int
3039 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3040 {
3041         u32 offset;
3042         u32 val;
3043         int rc;
3044
3045         /* Halt the CPU. */
3046         val = REG_RD_IND(bp, cpu_reg->mode);
3047         val |= cpu_reg->mode_value_halt;
3048         REG_WR_IND(bp, cpu_reg->mode, val);
3049         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3050
3051         /* Load the Text area. */
3052         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3053         if (fw->gz_text) {
3054                 int j;
3055
3056                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3057                                        fw->gz_text_len);
3058                 if (rc < 0)
3059                         return rc;
3060
3061                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3062                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3063                 }
3064         }
3065
3066         /* Load the Data area. */
3067         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3068         if (fw->data) {
3069                 int j;
3070
3071                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3072                         REG_WR_IND(bp, offset, fw->data[j]);
3073                 }
3074         }
3075
3076         /* Load the SBSS area. */
3077         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3078         if (fw->sbss_len) {
3079                 int j;
3080
3081                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3082                         REG_WR_IND(bp, offset, 0);
3083                 }
3084         }
3085
3086         /* Load the BSS area. */
3087         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3088         if (fw->bss_len) {
3089                 int j;
3090
3091                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3092                         REG_WR_IND(bp, offset, 0);
3093                 }
3094         }
3095
3096         /* Load the Read-Only area. */
3097         offset = cpu_reg->spad_base +
3098                 (fw->rodata_addr - cpu_reg->mips_view_base);
3099         if (fw->rodata) {
3100                 int j;
3101
3102                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3103                         REG_WR_IND(bp, offset, fw->rodata[j]);
3104                 }
3105         }
3106
3107         /* Clear the pre-fetch instruction. */
3108         REG_WR_IND(bp, cpu_reg->inst, 0);
3109         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3110
3111         /* Start the CPU. */
3112         val = REG_RD_IND(bp, cpu_reg->mode);
3113         val &= ~cpu_reg->mode_value_halt;
3114         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3115         REG_WR_IND(bp, cpu_reg->mode, val);
3116
3117         return 0;
3118 }
3119
3120 static int
3121 bnx2_init_cpus(struct bnx2 *bp)
3122 {
3123         struct cpu_reg cpu_reg;
3124         struct fw_info *fw;
3125         int rc, rv2p_len;
3126         void *text, *rv2p;
3127
3128         /* Initialize the RV2P processor. */
3129         text = vmalloc(FW_BUF_SIZE);
3130         if (!text)
3131                 return -ENOMEM;
3132         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3133                 rv2p = bnx2_xi_rv2p_proc1;
3134                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3135         } else {
3136                 rv2p = bnx2_rv2p_proc1;
3137                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3138         }
3139         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3140         if (rc < 0)
3141                 goto init_cpu_err;
3142
3143         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3144
3145         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3146                 rv2p = bnx2_xi_rv2p_proc2;
3147                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3148         } else {
3149                 rv2p = bnx2_rv2p_proc2;
3150                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3151         }
3152         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3153         if (rc < 0)
3154                 goto init_cpu_err;
3155
3156         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3157
3158         /* Initialize the RX Processor. */
3159         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3160         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3161         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3162         cpu_reg.state = BNX2_RXP_CPU_STATE;
3163         cpu_reg.state_value_clear = 0xffffff;
3164         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3165         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3166         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3167         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3168         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3169         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3170         cpu_reg.mips_view_base = 0x8000000;
3171
3172         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3173                 fw = &bnx2_rxp_fw_09;
3174         else
3175                 fw = &bnx2_rxp_fw_06;
3176
3177         fw->text = text;
3178         rc = load_cpu_fw(bp, &cpu_reg, fw);
3179         if (rc)
3180                 goto init_cpu_err;
3181
3182         /* Initialize the TX Processor. */
3183         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3184         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3185         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3186         cpu_reg.state = BNX2_TXP_CPU_STATE;
3187         cpu_reg.state_value_clear = 0xffffff;
3188         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3189         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3190         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3191         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3192         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3193         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3194         cpu_reg.mips_view_base = 0x8000000;
3195
3196         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3197                 fw = &bnx2_txp_fw_09;
3198         else
3199                 fw = &bnx2_txp_fw_06;
3200
3201         fw->text = text;
3202         rc = load_cpu_fw(bp, &cpu_reg, fw);
3203         if (rc)
3204                 goto init_cpu_err;
3205
3206         /* Initialize the TX Patch-up Processor. */
3207         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3208         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3209         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3210         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3211         cpu_reg.state_value_clear = 0xffffff;
3212         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3213         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3214         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3215         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3216         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3217         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3218         cpu_reg.mips_view_base = 0x8000000;
3219
3220         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3221                 fw = &bnx2_tpat_fw_09;
3222         else
3223                 fw = &bnx2_tpat_fw_06;
3224
3225         fw->text = text;
3226         rc = load_cpu_fw(bp, &cpu_reg, fw);
3227         if (rc)
3228                 goto init_cpu_err;
3229
3230         /* Initialize the Completion Processor. */
3231         cpu_reg.mode = BNX2_COM_CPU_MODE;
3232         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3233         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3234         cpu_reg.state = BNX2_COM_CPU_STATE;
3235         cpu_reg.state_value_clear = 0xffffff;
3236         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3237         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3238         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3239         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3240         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3241         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3242         cpu_reg.mips_view_base = 0x8000000;
3243
3244         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3245                 fw = &bnx2_com_fw_09;
3246         else
3247                 fw = &bnx2_com_fw_06;
3248
3249         fw->text = text;
3250         rc = load_cpu_fw(bp, &cpu_reg, fw);
3251         if (rc)
3252                 goto init_cpu_err;
3253
3254         /* Initialize the Command Processor. */
3255         cpu_reg.mode = BNX2_CP_CPU_MODE;
3256         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3257         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3258         cpu_reg.state = BNX2_CP_CPU_STATE;
3259         cpu_reg.state_value_clear = 0xffffff;
3260         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3261         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3262         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3263         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3264         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3265         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3266         cpu_reg.mips_view_base = 0x8000000;
3267
3268         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3269                 fw = &bnx2_cp_fw_09;
3270         else
3271                 fw = &bnx2_cp_fw_06;
3272
3273         fw->text = text;
3274         rc = load_cpu_fw(bp, &cpu_reg, fw);
3275
3276 init_cpu_err:
3277         vfree(text);
3278         return rc;
3279 }
3280
3281 static int
3282 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3283 {
3284         u16 pmcsr;
3285
3286         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3287
3288         switch (state) {
3289         case PCI_D0: {
3290                 u32 val;
3291
3292                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3293                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3294                         PCI_PM_CTRL_PME_STATUS);
3295
3296                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3297                         /* delay required during transition out of D3hot */
3298                         msleep(20);
3299
3300                 val = REG_RD(bp, BNX2_EMAC_MODE);
3301                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3302                 val &= ~BNX2_EMAC_MODE_MPKT;
3303                 REG_WR(bp, BNX2_EMAC_MODE, val);
3304
3305                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3306                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3307                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3308                 break;
3309         }
3310         case PCI_D3hot: {
3311                 int i;
3312                 u32 val, wol_msg;
3313
3314                 if (bp->wol) {
3315                         u32 advertising;
3316                         u8 autoneg;
3317
3318                         autoneg = bp->autoneg;
3319                         advertising = bp->advertising;
3320
3321                         if (bp->phy_port == PORT_TP) {
3322                                 bp->autoneg = AUTONEG_SPEED;
3323                                 bp->advertising = ADVERTISED_10baseT_Half |
3324                                         ADVERTISED_10baseT_Full |
3325                                         ADVERTISED_100baseT_Half |
3326                                         ADVERTISED_100baseT_Full |
3327                                         ADVERTISED_Autoneg;
3328                         }
3329
3330                         spin_lock_bh(&bp->phy_lock);
3331                         bnx2_setup_phy(bp, bp->phy_port);
3332                         spin_unlock_bh(&bp->phy_lock);
3333
3334                         bp->autoneg = autoneg;
3335                         bp->advertising = advertising;
3336
3337                         bnx2_set_mac_addr(bp);
3338
3339                         val = REG_RD(bp, BNX2_EMAC_MODE);
3340
3341                         /* Enable port mode. */
3342                         val &= ~BNX2_EMAC_MODE_PORT;
3343                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3344                                BNX2_EMAC_MODE_ACPI_RCVD |
3345                                BNX2_EMAC_MODE_MPKT;
3346                         if (bp->phy_port == PORT_TP)
3347                                 val |= BNX2_EMAC_MODE_PORT_MII;
3348                         else {
3349                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3350                                 if (bp->line_speed == SPEED_2500)
3351                                         val |= BNX2_EMAC_MODE_25G_MODE;
3352                         }
3353
3354                         REG_WR(bp, BNX2_EMAC_MODE, val);
3355
3356                         /* receive all multicast */
3357                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3358                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3359                                        0xffffffff);
3360                         }
3361                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3362                                BNX2_EMAC_RX_MODE_SORT_MODE);
3363
3364                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3365                               BNX2_RPM_SORT_USER0_MC_EN;
3366                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3367                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3368                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3369                                BNX2_RPM_SORT_USER0_ENA);
3370
3371                         /* Need to enable EMAC and RPM for WOL. */
3372                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3373                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3374                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3375                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3376
3377                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3378                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3379                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3380
3381                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3382                 }
3383                 else {
3384                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3385                 }
3386
3387                 if (!(bp->flags & NO_WOL_FLAG))
3388                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3389
3390                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3391                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3392                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3393
3394                         if (bp->wol)
3395                                 pmcsr |= 3;
3396                 }
3397                 else {
3398                         pmcsr |= 3;
3399                 }
3400                 if (bp->wol) {
3401                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3402                 }
3403                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3404                                       pmcsr);
3405
3406                 /* No more memory access after this point until
3407                  * device is brought back to D0.
3408                  */
3409                 udelay(50);
3410                 break;
3411         }
3412         default:
3413                 return -EINVAL;
3414         }
3415         return 0;
3416 }
3417
3418 static int
3419 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3420 {
3421         u32 val;
3422         int j;
3423
3424         /* Request access to the flash interface. */
3425         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3426         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3428                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3429                         break;
3430
3431                 udelay(5);
3432         }
3433
3434         if (j >= NVRAM_TIMEOUT_COUNT)
3435                 return -EBUSY;
3436
3437         return 0;
3438 }
3439
3440 static int
3441 bnx2_release_nvram_lock(struct bnx2 *bp)
3442 {
3443         int j;
3444         u32 val;
3445
3446         /* Relinquish nvram interface. */
3447         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3448
3449         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3450                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3451                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3452                         break;
3453
3454                 udelay(5);
3455         }
3456
3457         if (j >= NVRAM_TIMEOUT_COUNT)
3458                 return -EBUSY;
3459
3460         return 0;
3461 }
3462
3463
3464 static int
3465 bnx2_enable_nvram_write(struct bnx2 *bp)
3466 {
3467         u32 val;
3468
3469         val = REG_RD(bp, BNX2_MISC_CFG);
3470         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3471
3472         if (bp->flash_info->flags & BNX2_NV_WREN) {
3473                 int j;
3474
3475                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3476                 REG_WR(bp, BNX2_NVM_COMMAND,
3477                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3478
3479                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3480                         udelay(5);
3481
3482                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3483                         if (val & BNX2_NVM_COMMAND_DONE)
3484                                 break;
3485                 }
3486
3487                 if (j >= NVRAM_TIMEOUT_COUNT)
3488                         return -EBUSY;
3489         }
3490         return 0;
3491 }
3492
3493 static void
3494 bnx2_disable_nvram_write(struct bnx2 *bp)
3495 {
3496         u32 val;
3497
3498         val = REG_RD(bp, BNX2_MISC_CFG);
3499         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3500 }
3501
3502
3503 static void
3504 bnx2_enable_nvram_access(struct bnx2 *bp)
3505 {
3506         u32 val;
3507
3508         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3509         /* Enable both bits, even on read. */
3510         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3511                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3512 }
3513
3514 static void
3515 bnx2_disable_nvram_access(struct bnx2 *bp)
3516 {
3517         u32 val;
3518
3519         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3520         /* Disable both bits, even after read. */
3521         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3522                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3523                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3524 }
3525
3526 static int
3527 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3528 {
3529         u32 cmd;
3530         int j;
3531
3532         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3533                 /* Buffered flash, no erase needed */
3534                 return 0;
3535
3536         /* Build an erase command */
3537         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3538               BNX2_NVM_COMMAND_DOIT;
3539
3540         /* Need to clear DONE bit separately. */
3541         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3542
3543         /* Address of the NVRAM to read from. */
3544         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3545
3546         /* Issue an erase command. */
3547         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3548
3549         /* Wait for completion. */
3550         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3551                 u32 val;
3552
3553                 udelay(5);
3554
3555                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3556                 if (val & BNX2_NVM_COMMAND_DONE)
3557                         break;
3558         }
3559
3560         if (j >= NVRAM_TIMEOUT_COUNT)
3561                 return -EBUSY;
3562
3563         return 0;
3564 }
3565
3566 static int
3567 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3568 {
3569         u32 cmd;
3570         int j;
3571
3572         /* Build the command word. */
3573         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3574
3575         /* Calculate an offset of a buffered flash, not needed for 5709. */
3576         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3577                 offset = ((offset / bp->flash_info->page_size) <<
3578                            bp->flash_info->page_bits) +
3579                           (offset % bp->flash_info->page_size);
3580         }
3581
3582         /* Need to clear DONE bit separately. */
3583         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584
3585         /* Address of the NVRAM to read from. */
3586         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3587
3588         /* Issue a read command. */
3589         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3590
3591         /* Wait for completion. */
3592         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3593                 u32 val;
3594
3595                 udelay(5);
3596
3597                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3598                 if (val & BNX2_NVM_COMMAND_DONE) {
3599                         val = REG_RD(bp, BNX2_NVM_READ);
3600
3601                         val = be32_to_cpu(val);
3602                         memcpy(ret_val, &val, 4);
3603                         break;
3604                 }
3605         }
3606         if (j >= NVRAM_TIMEOUT_COUNT)
3607                 return -EBUSY;
3608
3609         return 0;
3610 }
3611
3612
3613 static int
3614 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3615 {
3616         u32 cmd, val32;
3617         int j;
3618
3619         /* Build the command word. */
3620         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3621
3622         /* Calculate an offset of a buffered flash, not needed for 5709. */
3623         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3624                 offset = ((offset / bp->flash_info->page_size) <<
3625                           bp->flash_info->page_bits) +
3626                          (offset % bp->flash_info->page_size);
3627         }
3628
3629         /* Need to clear DONE bit separately. */
3630         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3631
3632         memcpy(&val32, val, 4);
3633         val32 = cpu_to_be32(val32);
3634
3635         /* Write the data. */
3636         REG_WR(bp, BNX2_NVM_WRITE, val32);
3637
3638         /* Address of the NVRAM to write to. */
3639         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3640
3641         /* Issue the write command. */
3642         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3643
3644         /* Wait for completion. */
3645         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3646                 udelay(5);
3647
3648                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3649                         break;
3650         }
3651         if (j >= NVRAM_TIMEOUT_COUNT)
3652                 return -EBUSY;
3653
3654         return 0;
3655 }
3656
3657 static int
3658 bnx2_init_nvram(struct bnx2 *bp)
3659 {
3660         u32 val;
3661         int j, entry_count, rc = 0;
3662         struct flash_spec *flash;
3663
3664         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3665                 bp->flash_info = &flash_5709;
3666                 goto get_flash_size;
3667         }
3668
3669         /* Determine the selected interface. */
3670         val = REG_RD(bp, BNX2_NVM_CFG1);
3671
3672         entry_count = ARRAY_SIZE(flash_table);
3673
3674         if (val & 0x40000000) {
3675
3676                 /* Flash interface has been reconfigured */
3677                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3678                      j++, flash++) {
3679                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3680                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3681                                 bp->flash_info = flash;
3682                                 break;
3683                         }
3684                 }
3685         }
3686         else {
3687                 u32 mask;
3688                 /* Not yet been reconfigured */
3689
3690                 if (val & (1 << 23))
3691                         mask = FLASH_BACKUP_STRAP_MASK;
3692                 else
3693                         mask = FLASH_STRAP_MASK;
3694
3695                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3696                         j++, flash++) {
3697
3698                         if ((val & mask) == (flash->strapping & mask)) {
3699                                 bp->flash_info = flash;
3700
3701                                 /* Request access to the flash interface. */
3702                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3703                                         return rc;
3704
3705                                 /* Enable access to flash interface */
3706                                 bnx2_enable_nvram_access(bp);
3707
3708                                 /* Reconfigure the flash interface */
3709                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3710                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3711                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3712                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3713
3714                                 /* Disable access to flash interface */
3715                                 bnx2_disable_nvram_access(bp);
3716                                 bnx2_release_nvram_lock(bp);
3717
3718                                 break;
3719                         }
3720                 }
3721         } /* if (val & 0x40000000) */
3722
3723         if (j == entry_count) {
3724                 bp->flash_info = NULL;
3725                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3726                 return -ENODEV;
3727         }
3728
3729 get_flash_size:
3730         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3731         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3732         if (val)
3733                 bp->flash_size = val;
3734         else
3735                 bp->flash_size = bp->flash_info->total_size;
3736
3737         return rc;
3738 }
3739
3740 static int
3741 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3742                 int buf_size)
3743 {
3744         int rc = 0;
3745         u32 cmd_flags, offset32, len32, extra;
3746
3747         if (buf_size == 0)
3748                 return 0;
3749
3750         /* Request access to the flash interface. */
3751         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752                 return rc;
3753
3754         /* Enable access to flash interface */
3755         bnx2_enable_nvram_access(bp);
3756
3757         len32 = buf_size;
3758         offset32 = offset;
3759         extra = 0;
3760
3761         cmd_flags = 0;
3762
3763         if (offset32 & 3) {
3764                 u8 buf[4];
3765                 u32 pre_len;
3766
3767                 offset32 &= ~3;
3768                 pre_len = 4 - (offset & 3);
3769
3770                 if (pre_len >= len32) {
3771                         pre_len = len32;
3772                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3773                                     BNX2_NVM_COMMAND_LAST;
3774                 }
3775                 else {
3776                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3777                 }
3778
3779                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780
3781                 if (rc)
3782                         return rc;
3783
3784                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3785
3786                 offset32 += 4;
3787                 ret_buf += pre_len;
3788                 len32 -= pre_len;
3789         }
3790         if (len32 & 3) {
3791                 extra = 4 - (len32 & 3);
3792                 len32 = (len32 + 4) & ~3;
3793         }
3794
3795         if (len32 == 4) {
3796                 u8 buf[4];
3797
3798                 if (cmd_flags)
3799                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3800                 else
3801                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3802                                     BNX2_NVM_COMMAND_LAST;
3803
3804                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3805
3806                 memcpy(ret_buf, buf, 4 - extra);
3807         }
3808         else if (len32 > 0) {
3809                 u8 buf[4];
3810
3811                 /* Read the first word. */
3812                 if (cmd_flags)
3813                         cmd_flags = 0;
3814                 else
3815                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816
3817                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3818
3819                 /* Advance to the next dword. */
3820                 offset32 += 4;
3821                 ret_buf += 4;
3822                 len32 -= 4;
3823
3824                 while (len32 > 4 && rc == 0) {
3825                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3826
3827                         /* Advance to the next dword. */
3828                         offset32 += 4;
3829                         ret_buf += 4;
3830                         len32 -= 4;
3831                 }
3832
3833                 if (rc)
3834                         return rc;
3835
3836                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3837                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3838
3839                 memcpy(ret_buf, buf, 4 - extra);
3840         }
3841
3842         /* Disable access to flash interface */
3843         bnx2_disable_nvram_access(bp);
3844
3845         bnx2_release_nvram_lock(bp);
3846
3847         return rc;
3848 }
3849
3850 static int
3851 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3852                 int buf_size)
3853 {
3854         u32 written, offset32, len32;
3855         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3856         int rc = 0;
3857         int align_start, align_end;
3858
3859         buf = data_buf;
3860         offset32 = offset;
3861         len32 = buf_size;
3862         align_start = align_end = 0;
3863
3864         if ((align_start = (offset32 & 3))) {
3865                 offset32 &= ~3;
3866                 len32 += align_start;
3867                 if (len32 < 4)
3868                         len32 = 4;
3869                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3870                         return rc;
3871         }
3872
3873         if (len32 & 3) {
3874                 align_end = 4 - (len32 & 3);
3875                 len32 += align_end;
3876                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3877                         return rc;
3878         }
3879
3880         if (align_start || align_end) {
3881                 align_buf = kmalloc(len32, GFP_KERNEL);
3882                 if (align_buf == NULL)
3883                         return -ENOMEM;
3884                 if (align_start) {
3885                         memcpy(align_buf, start, 4);
3886                 }
3887                 if (align_end) {
3888                         memcpy(align_buf + len32 - 4, end, 4);
3889                 }
3890                 memcpy(align_buf + align_start, data_buf, buf_size);
3891                 buf = align_buf;
3892         }
3893
3894         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3895                 flash_buffer = kmalloc(264, GFP_KERNEL);
3896                 if (flash_buffer == NULL) {
3897                         rc = -ENOMEM;
3898                         goto nvram_write_end;
3899                 }
3900         }
3901
3902         written = 0;
3903         while ((written < len32) && (rc == 0)) {
3904                 u32 page_start, page_end, data_start, data_end;
3905                 u32 addr, cmd_flags;
3906                 int i;
3907
3908                 /* Find the page_start addr */
3909                 page_start = offset32 + written;
3910                 page_start -= (page_start % bp->flash_info->page_size);
3911                 /* Find the page_end addr */
3912                 page_end = page_start + bp->flash_info->page_size;
3913                 /* Find the data_start addr */
3914                 data_start = (written == 0) ? offset32 : page_start;
3915                 /* Find the data_end addr */
3916                 data_end = (page_end > offset32 + len32) ?
3917                         (offset32 + len32) : page_end;
3918
3919                 /* Request access to the flash interface. */
3920                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3921                         goto nvram_write_end;
3922
3923                 /* Enable access to flash interface */
3924                 bnx2_enable_nvram_access(bp);
3925
3926                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3927                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3928                         int j;
3929
3930                         /* Read the whole page into the buffer
3931                          * (non-buffer flash only) */
3932                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3933                                 if (j == (bp->flash_info->page_size - 4)) {
3934                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3935                                 }
3936                                 rc = bnx2_nvram_read_dword(bp,
3937                                         page_start + j,
3938                                         &flash_buffer[j],
3939                                         cmd_flags);
3940
3941                                 if (rc)
3942                                         goto nvram_write_end;
3943
3944                                 cmd_flags = 0;
3945                         }
3946                 }
3947
3948                 /* Enable writes to flash interface (unlock write-protect) */
3949                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3950                         goto nvram_write_end;
3951
3952                 /* Loop to write back the buffer data from page_start to
3953                  * data_start */
3954                 i = 0;
3955                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3956                         /* Erase the page */
3957                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3958                                 goto nvram_write_end;
3959
3960                         /* Re-enable the write again for the actual write */
3961                         bnx2_enable_nvram_write(bp);
3962
3963                         for (addr = page_start; addr < data_start;
3964                                 addr += 4, i += 4) {
3965
3966                                 rc = bnx2_nvram_write_dword(bp, addr,
3967                                         &flash_buffer[i], cmd_flags);
3968
3969                                 if (rc != 0)
3970                                         goto nvram_write_end;
3971
3972                                 cmd_flags = 0;
3973                         }
3974                 }
3975
3976                 /* Loop to write the new data from data_start to data_end */
3977                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3978                         if ((addr == page_end - 4) ||
3979                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3980                                  (addr == data_end - 4))) {
3981
3982                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3983                         }
3984                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3985                                 cmd_flags);
3986
3987                         if (rc != 0)
3988                                 goto nvram_write_end;
3989
3990                         cmd_flags = 0;
3991                         buf += 4;
3992                 }
3993
3994                 /* Loop to write back the buffer data from data_end
3995                  * to page_end */
3996                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3997                         for (addr = data_end; addr < page_end;
3998                                 addr += 4, i += 4) {
3999
4000                                 if (addr == page_end-4) {
4001                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4002                                 }
4003                                 rc = bnx2_nvram_write_dword(bp, addr,
4004                                         &flash_buffer[i], cmd_flags);
4005
4006                                 if (rc != 0)
4007                                         goto nvram_write_end;
4008
4009                                 cmd_flags = 0;
4010                         }
4011                 }
4012
4013                 /* Disable writes to flash interface (lock write-protect) */
4014                 bnx2_disable_nvram_write(bp);
4015
4016                 /* Disable access to flash interface */
4017                 bnx2_disable_nvram_access(bp);
4018                 bnx2_release_nvram_lock(bp);
4019
4020                 /* Increment written */
4021                 written += data_end - data_start;
4022         }
4023
4024 nvram_write_end:
4025         kfree(flash_buffer);
4026         kfree(align_buf);
4027         return rc;
4028 }
4029
4030 static void
4031 bnx2_init_remote_phy(struct bnx2 *bp)
4032 {
4033         u32 val;
4034
4035         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4036         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4037                 return;
4038
4039         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4040         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4041                 return;
4042
4043         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4044                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4045
4046                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4047                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4048                         bp->phy_port = PORT_FIBRE;
4049                 else
4050                         bp->phy_port = PORT_TP;
4051
4052                 if (netif_running(bp->dev)) {
4053                         u32 sig;
4054
4055                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4056                                 bp->link_up = 1;
4057                                 netif_carrier_on(bp->dev);
4058                         } else {
4059                                 bp->link_up = 0;
4060                                 netif_carrier_off(bp->dev);
4061                         }
4062                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4063                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4064                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4065                                    sig);
4066                 }
4067         }
4068 }
4069
4070 static int
4071 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4072 {
4073         u32 val;
4074         int i, rc = 0;
4075         u8 old_port;
4076
4077         /* Wait for the current PCI transaction to complete before
4078          * issuing a reset. */
4079         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4080                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4081                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4082                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4083                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4084         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4085         udelay(5);
4086
4087         /* Wait for the firmware to tell us it is ok to issue a reset. */
4088         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4089
4090         /* Deposit a driver reset signature so the firmware knows that
4091          * this is a soft reset. */
4092         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4093                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4094
4095         /* Do a dummy read to force the chip to complete all current transaction
4096          * before we issue a reset. */
4097         val = REG_RD(bp, BNX2_MISC_ID);
4098
4099         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4100                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4101                 REG_RD(bp, BNX2_MISC_COMMAND);
4102                 udelay(5);
4103
4104                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4105                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4106
4107                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4108
4109         } else {
4110                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4111                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4112                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4113
4114                 /* Chip reset. */
4115                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4116
4117                 /* Reading back any register after chip reset will hang the
4118                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4119                  * of margin for write posting.
4120                  */
4121                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4122                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4123                         msleep(20);
4124
4125                 /* Reset takes approximate 30 usec */
4126                 for (i = 0; i < 10; i++) {
4127                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4128                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4129                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4130                                 break;
4131                         udelay(10);
4132                 }
4133
4134                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4135                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4136                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4137                         return -EBUSY;
4138                 }
4139         }
4140
4141         /* Make sure byte swapping is properly configured. */
4142         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4143         if (val != 0x01020304) {
4144                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4145                 return -ENODEV;
4146         }
4147
4148         /* Wait for the firmware to finish its initialization. */
4149         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4150         if (rc)
4151                 return rc;
4152
4153         spin_lock_bh(&bp->phy_lock);
4154         old_port = bp->phy_port;
4155         bnx2_init_remote_phy(bp);
4156         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4157                 bnx2_set_default_remote_link(bp);
4158         spin_unlock_bh(&bp->phy_lock);
4159
4160         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4161                 /* Adjust the voltage regular to two steps lower.  The default
4162                  * of this register is 0x0000000e. */
4163                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4164
4165                 /* Remove bad rbuf memory from the free pool. */
4166                 rc = bnx2_alloc_bad_rbuf(bp);
4167         }
4168
4169         return rc;
4170 }
4171
4172 static int
4173 bnx2_init_chip(struct bnx2 *bp)
4174 {
4175         u32 val;
4176         int rc;
4177
4178         /* Make sure the interrupt is not active. */
4179         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4180
4181         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4182               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4183 #ifdef __BIG_ENDIAN
4184               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4185 #endif
4186               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4187               DMA_READ_CHANS << 12 |
4188               DMA_WRITE_CHANS << 16;
4189
4190         val |= (0x2 << 20) | (1 << 11);
4191
4192         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4193                 val |= (1 << 23);
4194
4195         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4196             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4197                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4198
4199         REG_WR(bp, BNX2_DMA_CONFIG, val);
4200
4201         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4202                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4203                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4204                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4205         }
4206
4207         if (bp->flags & PCIX_FLAG) {
4208                 u16 val16;
4209
4210                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4211                                      &val16);
4212                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4213                                       val16 & ~PCI_X_CMD_ERO);
4214         }
4215
4216         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4217                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4218                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4219                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4220
4221         /* Initialize context mapping and zero out the quick contexts.  The
4222          * context block must have already been enabled. */
4223         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4224                 rc = bnx2_init_5709_context(bp);
4225                 if (rc)
4226                         return rc;
4227         } else
4228                 bnx2_init_context(bp);
4229
4230         if ((rc = bnx2_init_cpus(bp)) != 0)
4231                 return rc;
4232
4233         bnx2_init_nvram(bp);
4234
4235         bnx2_set_mac_addr(bp);
4236
4237         val = REG_RD(bp, BNX2_MQ_CONFIG);
4238         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4239         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4240         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4241                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4242
4243         REG_WR(bp, BNX2_MQ_CONFIG, val);
4244
4245         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4246         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4247         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4248
4249         val = (BCM_PAGE_BITS - 8) << 24;
4250         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4251
4252         /* Configure page size. */
4253         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4254         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4255         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4256         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4257
4258         val = bp->mac_addr[0] +
4259               (bp->mac_addr[1] << 8) +
4260               (bp->mac_addr[2] << 16) +
4261               bp->mac_addr[3] +
4262               (bp->mac_addr[4] << 8) +
4263               (bp->mac_addr[5] << 16);
4264         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4265
4266         /* Program the MTU.  Also include 4 bytes for CRC32. */
4267         val = bp->dev->mtu + ETH_HLEN + 4;
4268         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4269                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4270         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4271
4272         bp->bnx2_napi.last_status_idx = 0;
4273         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4274
4275         /* Set up how to generate a link change interrupt. */
4276         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4277
4278         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4279                (u64) bp->status_blk_mapping & 0xffffffff);
4280         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4281
4282         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4283                (u64) bp->stats_blk_mapping & 0xffffffff);
4284         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4285                (u64) bp->stats_blk_mapping >> 32);
4286
4287         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4288                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4289
4290         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4291                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4292
4293         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4294                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4295
4296         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4297
4298         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4299
4300         REG_WR(bp, BNX2_HC_COM_TICKS,
4301                (bp->com_ticks_int << 16) | bp->com_ticks);
4302
4303         REG_WR(bp, BNX2_HC_CMD_TICKS,
4304                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4305
4306         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4307                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4308         else
4309                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4310         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4311
4312         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4313                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4314         else {
4315                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4316                       BNX2_HC_CONFIG_COLLECT_STATS;
4317         }
4318
4319         if (bp->flags & ONE_SHOT_MSI_FLAG)
4320                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4321
4322         REG_WR(bp, BNX2_HC_CONFIG, val);
4323
4324         /* Clear internal stats counters. */
4325         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4326
4327         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4328
4329         /* Initialize the receive filter. */
4330         bnx2_set_rx_mode(bp->dev);
4331
4332         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4333                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4334                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4335                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4336         }
4337         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4338                           0);
4339
4340         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4341         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4342
4343         udelay(20);
4344
4345         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4346
4347         return rc;
4348 }
4349
4350 static void
4351 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4352 {
4353         u32 val, offset0, offset1, offset2, offset3;
4354
4355         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4356                 offset0 = BNX2_L2CTX_TYPE_XI;
4357                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4358                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4359                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4360         } else {
4361                 offset0 = BNX2_L2CTX_TYPE;
4362                 offset1 = BNX2_L2CTX_CMD_TYPE;
4363                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4364                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4365         }
4366         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4367         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4368
4369         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4370         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4371
4372         val = (u64) bp->tx_desc_mapping >> 32;
4373         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4374
4375         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4376         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4377 }
4378
4379 static void
4380 bnx2_init_tx_ring(struct bnx2 *bp)
4381 {
4382         struct tx_bd *txbd;
4383         u32 cid;
4384         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4385
4386         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4387
4388         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4389
4390         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4391         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4392
4393         bp->tx_prod = 0;
4394         bnapi->tx_cons = 0;
4395         bnapi->hw_tx_cons = 0;
4396         bp->tx_prod_bseq = 0;
4397
4398         cid = TX_CID;
4399         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4400         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4401
4402         bnx2_init_tx_context(bp, cid);
4403 }
4404
4405 static void
4406 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4407                      int num_rings)
4408 {
4409         int i;
4410         struct rx_bd *rxbd;
4411
4412         for (i = 0; i < num_rings; i++) {
4413                 int j;
4414
4415                 rxbd = &rx_ring[i][0];
4416                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4417                         rxbd->rx_bd_len = buf_size;
4418                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4419                 }
4420                 if (i == (num_rings - 1))
4421                         j = 0;
4422                 else
4423                         j = i + 1;
4424                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4425                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4426         }
4427 }
4428
4429 static void
4430 bnx2_init_rx_ring(struct bnx2 *bp)
4431 {
4432         int i;
4433         u16 prod, ring_prod;
4434         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4435
4436         bp->rx_prod = 0;
4437         bp->rx_cons = 0;
4438         bp->rx_prod_bseq = 0;
4439         bp->rx_pg_prod = 0;
4440         bp->rx_pg_cons = 0;
4441
4442         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4443                              bp->rx_buf_use_size, bp->rx_max_ring);
4444
4445         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4446         if (bp->rx_pg_ring_size) {
4447                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4448                                      bp->rx_pg_desc_mapping,
4449                                      PAGE_SIZE, bp->rx_max_pg_ring);
4450                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4451                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4452                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4453                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4454
4455                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4456                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4457
4458                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4459                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4460
4461                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4462                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4463         }
4464
4465         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4466         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4467         val |= 0x02 << 8;
4468         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4469
4470         val = (u64) bp->rx_desc_mapping[0] >> 32;
4471         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4472
4473         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4474         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4475
4476         ring_prod = prod = bp->rx_pg_prod;
4477         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4478                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4479                         break;
4480                 prod = NEXT_RX_BD(prod);
4481                 ring_prod = RX_PG_RING_IDX(prod);
4482         }
4483         bp->rx_pg_prod = prod;
4484
4485         ring_prod = prod = bp->rx_prod;
4486         for (i = 0; i < bp->rx_ring_size; i++) {
4487                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4488                         break;
4489                 }
4490                 prod = NEXT_RX_BD(prod);
4491                 ring_prod = RX_RING_IDX(prod);
4492         }
4493         bp->rx_prod = prod;
4494
4495         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4496         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4497
4498         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4499 }
4500
4501 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4502 {
4503         u32 max, num_rings = 1;
4504
4505         while (ring_size > MAX_RX_DESC_CNT) {
4506                 ring_size -= MAX_RX_DESC_CNT;
4507                 num_rings++;
4508         }
4509         /* round to next power of 2 */
4510         max = max_size;
4511         while ((max & num_rings) == 0)
4512                 max >>= 1;
4513
4514         if (num_rings != max)
4515                 max <<= 1;
4516
4517         return max;
4518 }
4519
4520 static void
4521 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4522 {
4523         u32 rx_size, rx_space, jumbo_size;
4524
4525         /* 8 for CRC and VLAN */
4526         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4527
4528         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4529                 sizeof(struct skb_shared_info);
4530
4531         bp->rx_copy_thresh = RX_COPY_THRESH;
4532         bp->rx_pg_ring_size = 0;
4533         bp->rx_max_pg_ring = 0;
4534         bp->rx_max_pg_ring_idx = 0;
4535         if (rx_space > PAGE_SIZE) {
4536                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4537
4538                 jumbo_size = size * pages;
4539                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4540                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4541
4542                 bp->rx_pg_ring_size = jumbo_size;
4543                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4544                                                         MAX_RX_PG_RINGS);
4545                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4546                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4547                 bp->rx_copy_thresh = 0;
4548         }
4549
4550         bp->rx_buf_use_size = rx_size;
4551         /* hw alignment */
4552         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4553         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4554         bp->rx_ring_size = size;
4555         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4556         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4557 }
4558
4559 static void
4560 bnx2_free_tx_skbs(struct bnx2 *bp)
4561 {
4562         int i;
4563
4564         if (bp->tx_buf_ring == NULL)
4565                 return;
4566
4567         for (i = 0; i < TX_DESC_CNT; ) {
4568                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4569                 struct sk_buff *skb = tx_buf->skb;
4570                 int j, last;
4571
4572                 if (skb == NULL) {
4573                         i++;
4574                         continue;
4575                 }
4576
4577                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4578                         skb_headlen(skb), PCI_DMA_TODEVICE);
4579
4580                 tx_buf->skb = NULL;
4581
4582                 last = skb_shinfo(skb)->nr_frags;
4583                 for (j = 0; j < last; j++) {
4584                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4585                         pci_unmap_page(bp->pdev,
4586                                 pci_unmap_addr(tx_buf, mapping),
4587                                 skb_shinfo(skb)->frags[j].size,
4588                                 PCI_DMA_TODEVICE);
4589                 }
4590                 dev_kfree_skb(skb);
4591                 i += j + 1;
4592         }
4593
4594 }
4595
4596 static void
4597 bnx2_free_rx_skbs(struct bnx2 *bp)
4598 {
4599         int i;
4600
4601         if (bp->rx_buf_ring == NULL)
4602                 return;
4603
4604         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4605                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4606                 struct sk_buff *skb = rx_buf->skb;
4607
4608                 if (skb == NULL)
4609                         continue;
4610
4611                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4612                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4613
4614                 rx_buf->skb = NULL;
4615
4616                 dev_kfree_skb(skb);
4617         }
4618         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4619                 bnx2_free_rx_page(bp, i);
4620 }
4621
4622 static void
4623 bnx2_free_skbs(struct bnx2 *bp)
4624 {
4625         bnx2_free_tx_skbs(bp);
4626         bnx2_free_rx_skbs(bp);
4627 }
4628
4629 static int
4630 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4631 {
4632         int rc;
4633
4634         rc = bnx2_reset_chip(bp, reset_code);
4635         bnx2_free_skbs(bp);
4636         if (rc)
4637                 return rc;
4638
4639         if ((rc = bnx2_init_chip(bp)) != 0)
4640                 return rc;
4641
4642         bnx2_init_tx_ring(bp);
4643         bnx2_init_rx_ring(bp);
4644         return 0;
4645 }
4646
4647 static int
4648 bnx2_init_nic(struct bnx2 *bp)
4649 {
4650         int rc;
4651
4652         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4653                 return rc;
4654
4655         spin_lock_bh(&bp->phy_lock);
4656         bnx2_init_phy(bp);
4657         bnx2_set_link(bp);
4658         spin_unlock_bh(&bp->phy_lock);
4659         return 0;
4660 }
4661
4662 static int
4663 bnx2_test_registers(struct bnx2 *bp)
4664 {
4665         int ret;
4666         int i, is_5709;
4667         static const struct {
4668                 u16   offset;
4669                 u16   flags;
4670 #define BNX2_FL_NOT_5709        1
4671                 u32   rw_mask;
4672                 u32   ro_mask;
4673         } reg_tbl[] = {
4674                 { 0x006c, 0, 0x00000000, 0x0000003f },
4675                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4676                 { 0x0094, 0, 0x00000000, 0x00000000 },
4677
4678                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4679                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4680                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4681                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4682                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4683                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4684                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4685                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4686                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4687
4688                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4689                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4690                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4691                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4692                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4693                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4694
4695                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4696                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4697                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4698
4699                 { 0x1000, 0, 0x00000000, 0x00000001 },
4700                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4701
4702                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4703                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4704                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4705                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4706                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4707                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4708                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4709                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4710                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4711                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4712
4713                 { 0x1800, 0, 0x00000000, 0x00000001 },
4714                 { 0x1804, 0, 0x00000000, 0x00000003 },
4715
4716                 { 0x2800, 0, 0x00000000, 0x00000001 },
4717                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4718                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4719                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4720                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4721                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4722                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4723                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4724                 { 0x2840, 0, 0x00000000, 0xffffffff },
4725                 { 0x2844, 0, 0x00000000, 0xffffffff },
4726                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4727                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4728
4729                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4730                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4731
4732                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4733                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4734                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4735                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4736                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4737                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4738                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4739                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4740                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4741
4742                 { 0x5004, 0, 0x00000000, 0x0000007f },
4743                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4744
4745                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4746                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4747                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4748                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4749                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4750                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4751                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4752                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4753                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4754
4755                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4756                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4757                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4758                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4759                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4760                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4761                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4762                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4763                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4764                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4765                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4766                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4767                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4768                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4769                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4770                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4771                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4772                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4773                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4774                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4775                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4776                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4777                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4778
4779                 { 0xffff, 0, 0x00000000, 0x00000000 },
4780         };
4781
4782         ret = 0;
4783         is_5709 = 0;
4784         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4785                 is_5709 = 1;
4786
4787         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4788                 u32 offset, rw_mask, ro_mask, save_val, val;
4789                 u16 flags = reg_tbl[i].flags;
4790
4791                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4792                         continue;
4793
4794                 offset = (u32) reg_tbl[i].offset;
4795                 rw_mask = reg_tbl[i].rw_mask;
4796                 ro_mask = reg_tbl[i].ro_mask;
4797
4798                 save_val = readl(bp->regview + offset);
4799
4800                 writel(0, bp->regview + offset);
4801
4802                 val = readl(bp->regview + offset);
4803                 if ((val & rw_mask) != 0) {
4804                         goto reg_test_err;
4805                 }
4806
4807                 if ((val & ro_mask) != (save_val & ro_mask)) {
4808                         goto reg_test_err;
4809                 }
4810
4811                 writel(0xffffffff, bp->regview + offset);
4812
4813                 val = readl(bp->regview + offset);
4814                 if ((val & rw_mask) != rw_mask) {
4815                         goto reg_test_err;
4816                 }
4817
4818                 if ((val & ro_mask) != (save_val & ro_mask)) {
4819                         goto reg_test_err;
4820                 }
4821
4822                 writel(save_val, bp->regview + offset);
4823                 continue;
4824
4825 reg_test_err:
4826                 writel(save_val, bp->regview + offset);
4827                 ret = -ENODEV;
4828                 break;
4829         }
4830         return ret;
4831 }
4832
4833 static int
4834 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4835 {
4836         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4837                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4838         int i;
4839
4840         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4841                 u32 offset;
4842
4843                 for (offset = 0; offset < size; offset += 4) {
4844
4845                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4846
4847                         if (REG_RD_IND(bp, start + offset) !=
4848                                 test_pattern[i]) {
4849                                 return -ENODEV;
4850                         }
4851                 }
4852         }
4853         return 0;
4854 }
4855
4856 static int
4857 bnx2_test_memory(struct bnx2 *bp)
4858 {
4859         int ret = 0;
4860         int i;
4861         static struct mem_entry {
4862                 u32   offset;
4863                 u32   len;
4864         } mem_tbl_5706[] = {
4865                 { 0x60000,  0x4000 },
4866                 { 0xa0000,  0x3000 },
4867                 { 0xe0000,  0x4000 },
4868                 { 0x120000, 0x4000 },
4869                 { 0x1a0000, 0x4000 },
4870                 { 0x160000, 0x4000 },
4871                 { 0xffffffff, 0    },
4872         },
4873         mem_tbl_5709[] = {
4874                 { 0x60000,  0x4000 },
4875                 { 0xa0000,  0x3000 },
4876                 { 0xe0000,  0x4000 },
4877                 { 0x120000, 0x4000 },
4878                 { 0x1a0000, 0x4000 },
4879                 { 0xffffffff, 0    },
4880         };
4881         struct mem_entry *mem_tbl;
4882
4883         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4884                 mem_tbl = mem_tbl_5709;
4885         else
4886                 mem_tbl = mem_tbl_5706;
4887
4888         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4889                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4890                         mem_tbl[i].len)) != 0) {
4891                         return ret;
4892                 }
4893         }
4894
4895         return ret;
4896 }
4897
4898 #define BNX2_MAC_LOOPBACK       0
4899 #define BNX2_PHY_LOOPBACK       1
4900
4901 static int
4902 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4903 {
4904         unsigned int pkt_size, num_pkts, i;
4905         struct sk_buff *skb, *rx_skb;
4906         unsigned char *packet;
4907         u16 rx_start_idx, rx_idx;
4908         dma_addr_t map;
4909         struct tx_bd *txbd;
4910         struct sw_bd *rx_buf;
4911         struct l2_fhdr *rx_hdr;
4912         int ret = -ENODEV;
4913         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4914
4915         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4916                 bp->loopback = MAC_LOOPBACK;
4917                 bnx2_set_mac_loopback(bp);
4918         }
4919         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4920                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4921                         return 0;
4922
4923                 bp->loopback = PHY_LOOPBACK;
4924                 bnx2_set_phy_loopback(bp);
4925         }
4926         else
4927                 return -EINVAL;
4928
4929         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4930         skb = netdev_alloc_skb(bp->dev, pkt_size);
4931         if (!skb)
4932                 return -ENOMEM;
4933         packet = skb_put(skb, pkt_size);
4934         memcpy(packet, bp->dev->dev_addr, 6);
4935         memset(packet + 6, 0x0, 8);
4936         for (i = 14; i < pkt_size; i++)
4937                 packet[i] = (unsigned char) (i & 0xff);
4938
4939         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4940                 PCI_DMA_TODEVICE);
4941
4942         REG_WR(bp, BNX2_HC_COMMAND,
4943                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4944
4945         REG_RD(bp, BNX2_HC_COMMAND);
4946
4947         udelay(5);
4948         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
4949
4950         num_pkts = 0;
4951
4952         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4953
4954         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4955         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4956         txbd->tx_bd_mss_nbytes = pkt_size;
4957         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4958
4959         num_pkts++;
4960         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4961         bp->tx_prod_bseq += pkt_size;
4962
4963         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4964         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4965
4966         udelay(100);
4967
4968         REG_WR(bp, BNX2_HC_COMMAND,
4969                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4970
4971         REG_RD(bp, BNX2_HC_COMMAND);
4972
4973         udelay(5);
4974
4975         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4976         dev_kfree_skb(skb);
4977
4978         if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
4979                 goto loopback_test_done;
4980
4981         rx_idx = bnx2_get_hw_rx_cons(bnapi);
4982         if (rx_idx != rx_start_idx + num_pkts) {
4983                 goto loopback_test_done;
4984         }
4985
4986         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4987         rx_skb = rx_buf->skb;
4988
4989         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4990         skb_reserve(rx_skb, bp->rx_offset);
4991
4992         pci_dma_sync_single_for_cpu(bp->pdev,
4993                 pci_unmap_addr(rx_buf, mapping),
4994                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4995
4996         if (rx_hdr->l2_fhdr_status &
4997                 (L2_FHDR_ERRORS_BAD_CRC |
4998                 L2_FHDR_ERRORS_PHY_DECODE |
4999                 L2_FHDR_ERRORS_ALIGNMENT |
5000                 L2_FHDR_ERRORS_TOO_SHORT |
5001                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5002
5003                 goto loopback_test_done;
5004         }
5005
5006         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5007                 goto loopback_test_done;
5008         }
5009
5010         for (i = 14; i < pkt_size; i++) {
5011                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5012                         goto loopback_test_done;
5013                 }
5014         }
5015
5016         ret = 0;
5017
5018 loopback_test_done:
5019         bp->loopback = 0;
5020         return ret;
5021 }
5022
5023 #define BNX2_MAC_LOOPBACK_FAILED        1
5024 #define BNX2_PHY_LOOPBACK_FAILED        2
5025 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5026                                          BNX2_PHY_LOOPBACK_FAILED)
5027
5028 static int
5029 bnx2_test_loopback(struct bnx2 *bp)
5030 {
5031         int rc = 0;
5032
5033         if (!netif_running(bp->dev))
5034                 return BNX2_LOOPBACK_FAILED;
5035
5036         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5037         spin_lock_bh(&bp->phy_lock);
5038         bnx2_init_phy(bp);
5039         spin_unlock_bh(&bp->phy_lock);
5040         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5041                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5042         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5043                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5044         return rc;
5045 }
5046
5047 #define NVRAM_SIZE 0x200
5048 #define CRC32_RESIDUAL 0xdebb20e3
5049
5050 static int
5051 bnx2_test_nvram(struct bnx2 *bp)
5052 {
5053         u32 buf[NVRAM_SIZE / 4];
5054         u8 *data = (u8 *) buf;
5055         int rc = 0;
5056         u32 magic, csum;
5057
5058         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5059                 goto test_nvram_done;
5060
5061         magic = be32_to_cpu(buf[0]);
5062         if (magic != 0x669955aa) {
5063                 rc = -ENODEV;
5064                 goto test_nvram_done;
5065         }
5066
5067         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5068                 goto test_nvram_done;
5069
5070         csum = ether_crc_le(0x100, data);
5071         if (csum != CRC32_RESIDUAL) {
5072                 rc = -ENODEV;
5073                 goto test_nvram_done;
5074         }
5075
5076         csum = ether_crc_le(0x100, data + 0x100);
5077         if (csum != CRC32_RESIDUAL) {
5078                 rc = -ENODEV;
5079         }
5080
5081 test_nvram_done:
5082         return rc;
5083 }
5084
5085 static int
5086 bnx2_test_link(struct bnx2 *bp)
5087 {
5088         u32 bmsr;
5089
5090         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5091                 if (bp->link_up)
5092                         return 0;
5093                 return -ENODEV;
5094         }
5095         spin_lock_bh(&bp->phy_lock);
5096         bnx2_enable_bmsr1(bp);
5097         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5098         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5099         bnx2_disable_bmsr1(bp);
5100         spin_unlock_bh(&bp->phy_lock);
5101
5102         if (bmsr & BMSR_LSTATUS) {
5103                 return 0;
5104         }
5105         return -ENODEV;
5106 }
5107
5108 static int
5109 bnx2_test_intr(struct bnx2 *bp)
5110 {
5111         int i;
5112         u16 status_idx;
5113
5114         if (!netif_running(bp->dev))
5115                 return -ENODEV;
5116
5117         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5118
5119         /* This register is not touched during run-time. */
5120         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5121         REG_RD(bp, BNX2_HC_COMMAND);
5122
5123         for (i = 0; i < 10; i++) {
5124                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5125                         status_idx) {
5126
5127                         break;
5128                 }
5129
5130                 msleep_interruptible(10);
5131         }
5132         if (i < 10)
5133                 return 0;
5134
5135         return -ENODEV;
5136 }
5137
5138 static void
5139 bnx2_5706_serdes_timer(struct bnx2 *bp)
5140 {
5141         spin_lock(&bp->phy_lock);
5142         if (bp->serdes_an_pending)
5143                 bp->serdes_an_pending--;
5144         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5145                 u32 bmcr;
5146
5147                 bp->current_interval = bp->timer_interval;
5148
5149                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5150
5151                 if (bmcr & BMCR_ANENABLE) {
5152                         u32 phy1, phy2;
5153
5154                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5155                         bnx2_read_phy(bp, 0x1c, &phy1);
5156
5157                         bnx2_write_phy(bp, 0x17, 0x0f01);
5158                         bnx2_read_phy(bp, 0x15, &phy2);
5159                         bnx2_write_phy(bp, 0x17, 0x0f01);
5160                         bnx2_read_phy(bp, 0x15, &phy2);
5161
5162                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5163                                 !(phy2 & 0x20)) {       /* no CONFIG */
5164
5165                                 bmcr &= ~BMCR_ANENABLE;
5166                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5167                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5168                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5169                         }
5170                 }
5171         }
5172         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5173                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5174                 u32 phy2;
5175
5176                 bnx2_write_phy(bp, 0x17, 0x0f01);
5177                 bnx2_read_phy(bp, 0x15, &phy2);
5178                 if (phy2 & 0x20) {
5179                         u32 bmcr;
5180
5181                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5182                         bmcr |= BMCR_ANENABLE;
5183                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5184
5185                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5186                 }
5187         } else
5188                 bp->current_interval = bp->timer_interval;
5189
5190         spin_unlock(&bp->phy_lock);
5191 }
5192
5193 static void
5194 bnx2_5708_serdes_timer(struct bnx2 *bp)
5195 {
5196         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5197                 return;
5198
5199         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5200                 bp->serdes_an_pending = 0;
5201                 return;
5202         }
5203
5204         spin_lock(&bp->phy_lock);
5205         if (bp->serdes_an_pending)
5206                 bp->serdes_an_pending--;
5207         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5208                 u32 bmcr;
5209
5210                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5211                 if (bmcr & BMCR_ANENABLE) {
5212                         bnx2_enable_forced_2g5(bp);
5213                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5214                 } else {
5215                         bnx2_disable_forced_2g5(bp);
5216                         bp->serdes_an_pending = 2;
5217                         bp->current_interval = bp->timer_interval;
5218                 }
5219
5220         } else
5221                 bp->current_interval = bp->timer_interval;
5222
5223         spin_unlock(&bp->phy_lock);
5224 }
5225
5226 static void
5227 bnx2_timer(unsigned long data)
5228 {
5229         struct bnx2 *bp = (struct bnx2 *) data;
5230
5231         if (!netif_running(bp->dev))
5232                 return;
5233
5234         if (atomic_read(&bp->intr_sem) != 0)
5235                 goto bnx2_restart_timer;
5236
5237         bnx2_send_heart_beat(bp);
5238
5239         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5240
5241         /* workaround occasional corrupted counters */
5242         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5243                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5244                                             BNX2_HC_COMMAND_STATS_NOW);
5245
5246         if (bp->phy_flags & PHY_SERDES_FLAG) {
5247                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5248                         bnx2_5706_serdes_timer(bp);
5249                 else
5250                         bnx2_5708_serdes_timer(bp);
5251         }
5252
5253 bnx2_restart_timer:
5254         mod_timer(&bp->timer, jiffies + bp->current_interval);
5255 }
5256
5257 static int
5258 bnx2_request_irq(struct bnx2 *bp)
5259 {
5260         struct net_device *dev = bp->dev;
5261         unsigned long flags;
5262         struct bnx2_irq *irq = &bp->irq_tbl[0];
5263         int rc;
5264
5265         if (bp->flags & USING_MSI_FLAG)
5266                 flags = 0;
5267         else
5268                 flags = IRQF_SHARED;
5269         rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
5270         return rc;
5271 }
5272
5273 static void
5274 bnx2_free_irq(struct bnx2 *bp)
5275 {
5276         struct net_device *dev = bp->dev;
5277
5278         free_irq(bp->irq_tbl[0].vector, dev);
5279         if (bp->flags & USING_MSI_FLAG) {
5280                 pci_disable_msi(bp->pdev);
5281                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5282         }
5283 }
5284
5285 static void
5286 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5287 {
5288         bp->irq_tbl[0].handler = bnx2_interrupt;
5289         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5290
5291         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5292                 if (pci_enable_msi(bp->pdev) == 0) {
5293                         bp->flags |= USING_MSI_FLAG;
5294                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5295                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5296                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5297                         } else
5298                                 bp->irq_tbl[0].handler = bnx2_msi;
5299                 }
5300         }
5301
5302         bp->irq_tbl[0].vector = bp->pdev->irq;
5303 }
5304
5305 /* Called with rtnl_lock */
5306 static int
5307 bnx2_open(struct net_device *dev)
5308 {
5309         struct bnx2 *bp = netdev_priv(dev);
5310         int rc;
5311
5312         netif_carrier_off(dev);
5313
5314         bnx2_set_power_state(bp, PCI_D0);
5315         bnx2_disable_int(bp);
5316
5317         rc = bnx2_alloc_mem(bp);
5318         if (rc)
5319                 return rc;
5320
5321         bnx2_setup_int_mode(bp, disable_msi);
5322         bnx2_napi_enable(bp);
5323         rc = bnx2_request_irq(bp);
5324
5325         if (rc) {
5326                 bnx2_napi_disable(bp);
5327                 bnx2_free_mem(bp);
5328                 return rc;
5329         }
5330
5331         rc = bnx2_init_nic(bp);
5332
5333         if (rc) {
5334                 bnx2_napi_disable(bp);
5335                 bnx2_free_irq(bp);
5336                 bnx2_free_skbs(bp);
5337                 bnx2_free_mem(bp);
5338                 return rc;
5339         }
5340
5341         mod_timer(&bp->timer, jiffies + bp->current_interval);
5342
5343         atomic_set(&bp->intr_sem, 0);
5344
5345         bnx2_enable_int(bp);
5346
5347         if (bp->flags & USING_MSI_FLAG) {
5348                 /* Test MSI to make sure it is working
5349                  * If MSI test fails, go back to INTx mode
5350                  */
5351                 if (bnx2_test_intr(bp) != 0) {
5352                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5353                                " using MSI, switching to INTx mode. Please"
5354                                " report this failure to the PCI maintainer"
5355                                " and include system chipset information.\n",
5356                                bp->dev->name);
5357
5358                         bnx2_disable_int(bp);
5359                         bnx2_free_irq(bp);
5360
5361                         bnx2_setup_int_mode(bp, 1);
5362
5363                         rc = bnx2_init_nic(bp);
5364
5365                         if (!rc)
5366                                 rc = bnx2_request_irq(bp);
5367
5368                         if (rc) {
5369                                 bnx2_napi_disable(bp);
5370                                 bnx2_free_skbs(bp);
5371                                 bnx2_free_mem(bp);
5372                                 del_timer_sync(&bp->timer);
5373                                 return rc;
5374                         }
5375                         bnx2_enable_int(bp);
5376                 }
5377         }
5378         if (bp->flags & USING_MSI_FLAG) {
5379                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5380         }
5381
5382         netif_start_queue(dev);
5383
5384         return 0;
5385 }
5386
5387 static void
5388 bnx2_reset_task(struct work_struct *work)
5389 {
5390         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5391
5392         if (!netif_running(bp->dev))
5393                 return;
5394
5395         bp->in_reset_task = 1;
5396         bnx2_netif_stop(bp);
5397
5398         bnx2_init_nic(bp);
5399
5400         atomic_set(&bp->intr_sem, 1);
5401         bnx2_netif_start(bp);
5402         bp->in_reset_task = 0;
5403 }
5404
5405 static void
5406 bnx2_tx_timeout(struct net_device *dev)
5407 {
5408         struct bnx2 *bp = netdev_priv(dev);
5409
5410         /* This allows the netif to be shutdown gracefully before resetting */
5411         schedule_work(&bp->reset_task);
5412 }
5413
5414 #ifdef BCM_VLAN
5415 /* Called with rtnl_lock */
5416 static void
5417 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5418 {
5419         struct bnx2 *bp = netdev_priv(dev);
5420
5421         bnx2_netif_stop(bp);
5422
5423         bp->vlgrp = vlgrp;
5424         bnx2_set_rx_mode(dev);
5425
5426         bnx2_netif_start(bp);
5427 }
5428 #endif
5429
5430 /* Called with netif_tx_lock.
5431  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5432  * netif_wake_queue().
5433  */
5434 static int
5435 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5436 {
5437         struct bnx2 *bp = netdev_priv(dev);
5438         dma_addr_t mapping;
5439         struct tx_bd *txbd;
5440         struct sw_bd *tx_buf;
5441         u32 len, vlan_tag_flags, last_frag, mss;
5442         u16 prod, ring_prod;
5443         int i;
5444         struct bnx2_napi *bnapi = &bp->bnx2_napi;
5445
5446         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5447             (skb_shinfo(skb)->nr_frags + 1))) {
5448                 netif_stop_queue(dev);
5449                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5450                         dev->name);
5451
5452                 return NETDEV_TX_BUSY;
5453         }
5454         len = skb_headlen(skb);
5455         prod = bp->tx_prod;
5456         ring_prod = TX_RING_IDX(prod);
5457
5458         vlan_tag_flags = 0;
5459         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5460                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5461         }
5462
5463         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5464                 vlan_tag_flags |=
5465                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5466         }
5467         if ((mss = skb_shinfo(skb)->gso_size)) {
5468                 u32 tcp_opt_len, ip_tcp_len;
5469                 struct iphdr *iph;
5470
5471                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5472
5473                 tcp_opt_len = tcp_optlen(skb);
5474
5475                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5476                         u32 tcp_off = skb_transport_offset(skb) -
5477                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5478
5479                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5480                                           TX_BD_FLAGS_SW_FLAGS;
5481                         if (likely(tcp_off == 0))
5482                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5483                         else {
5484                                 tcp_off >>= 3;
5485                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5486                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5487                                                   ((tcp_off & 0x10) <<
5488                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5489                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5490                         }
5491                 } else {
5492                         if (skb_header_cloned(skb) &&
5493                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5494                                 dev_kfree_skb(skb);
5495                                 return NETDEV_TX_OK;
5496                         }
5497
5498                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5499
5500                         iph = ip_hdr(skb);
5501                         iph->check = 0;
5502                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5503                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5504                                                                  iph->daddr, 0,
5505                                                                  IPPROTO_TCP,
5506                                                                  0);
5507                         if (tcp_opt_len || (iph->ihl > 5)) {
5508                                 vlan_tag_flags |= ((iph->ihl - 5) +
5509                                                    (tcp_opt_len >> 2)) << 8;
5510                         }
5511                 }
5512         } else
5513                 mss = 0;
5514
5515         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5516
5517         tx_buf = &bp->tx_buf_ring[ring_prod];
5518         tx_buf->skb = skb;
5519         pci_unmap_addr_set(tx_buf, mapping, mapping);
5520
5521         txbd = &bp->tx_desc_ring[ring_prod];
5522
5523         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5524         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5525         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5526         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5527
5528         last_frag = skb_shinfo(skb)->nr_frags;
5529
5530         for (i = 0; i < last_frag; i++) {
5531                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5532
5533                 prod = NEXT_TX_BD(prod);
5534                 ring_prod = TX_RING_IDX(prod);
5535                 txbd = &bp->tx_desc_ring[ring_prod];
5536
5537                 len = frag->size;
5538                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5539                         len, PCI_DMA_TODEVICE);
5540                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5541                                 mapping, mapping);
5542
5543                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5544                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5545                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5546                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5547
5548         }
5549         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5550
5551         prod = NEXT_TX_BD(prod);
5552         bp->tx_prod_bseq += skb->len;
5553
5554         REG_WR16(bp, bp->tx_bidx_addr, prod);
5555         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5556
5557         mmiowb();
5558
5559         bp->tx_prod = prod;
5560         dev->trans_start = jiffies;
5561
5562         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5563                 netif_stop_queue(dev);
5564                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5565                         netif_wake_queue(dev);
5566         }
5567
5568         return NETDEV_TX_OK;
5569 }
5570
5571 /* Called with rtnl_lock */
5572 static int
5573 bnx2_close(struct net_device *dev)
5574 {
5575         struct bnx2 *bp = netdev_priv(dev);
5576         u32 reset_code;
5577
5578         /* Calling flush_scheduled_work() may deadlock because
5579          * linkwatch_event() may be on the workqueue and it will try to get
5580          * the rtnl_lock which we are holding.
5581          */
5582         while (bp->in_reset_task)
5583                 msleep(1);
5584
5585         bnx2_disable_int_sync(bp);
5586         bnx2_napi_disable(bp);
5587         del_timer_sync(&bp->timer);
5588         if (bp->flags & NO_WOL_FLAG)
5589                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5590         else if (bp->wol)
5591                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5592         else
5593                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5594         bnx2_reset_chip(bp, reset_code);
5595         bnx2_free_irq(bp);
5596         bnx2_free_skbs(bp);
5597         bnx2_free_mem(bp);
5598         bp->link_up = 0;
5599         netif_carrier_off(bp->dev);
5600         bnx2_set_power_state(bp, PCI_D3hot);
5601         return 0;
5602 }
5603
5604 #define GET_NET_STATS64(ctr)                                    \
5605         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5606         (unsigned long) (ctr##_lo)
5607
5608 #define GET_NET_STATS32(ctr)            \
5609         (ctr##_lo)
5610
5611 #if (BITS_PER_LONG == 64)
5612 #define GET_NET_STATS   GET_NET_STATS64
5613 #else
5614 #define GET_NET_STATS   GET_NET_STATS32
5615 #endif
5616
5617 static struct net_device_stats *
5618 bnx2_get_stats(struct net_device *dev)
5619 {
5620         struct bnx2 *bp = netdev_priv(dev);
5621         struct statistics_block *stats_blk = bp->stats_blk;
5622         struct net_device_stats *net_stats = &bp->net_stats;
5623
5624         if (bp->stats_blk == NULL) {
5625                 return net_stats;
5626         }
5627         net_stats->rx_packets =
5628                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5629                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5630                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5631
5632         net_stats->tx_packets =
5633                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5634                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5635                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5636
5637         net_stats->rx_bytes =
5638                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5639
5640         net_stats->tx_bytes =
5641                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5642
5643         net_stats->multicast =
5644                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5645
5646         net_stats->collisions =
5647                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5648
5649         net_stats->rx_length_errors =
5650                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5651                 stats_blk->stat_EtherStatsOverrsizePkts);
5652
5653         net_stats->rx_over_errors =
5654                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5655
5656         net_stats->rx_frame_errors =
5657                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5658
5659         net_stats->rx_crc_errors =
5660                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5661
5662         net_stats->rx_errors = net_stats->rx_length_errors +
5663                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5664                 net_stats->rx_crc_errors;
5665
5666         net_stats->tx_aborted_errors =
5667                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5668                 stats_blk->stat_Dot3StatsLateCollisions);
5669
5670         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5671             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5672                 net_stats->tx_carrier_errors = 0;
5673         else {
5674                 net_stats->tx_carrier_errors =
5675                         (unsigned long)
5676                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5677         }
5678
5679         net_stats->tx_errors =
5680                 (unsigned long)
5681                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5682                 +
5683                 net_stats->tx_aborted_errors +
5684                 net_stats->tx_carrier_errors;
5685
5686         net_stats->rx_missed_errors =
5687                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5688                 stats_blk->stat_FwRxDrop);
5689
5690         return net_stats;
5691 }
5692
5693 /* All ethtool functions called with rtnl_lock */
5694
5695 static int
5696 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5697 {
5698         struct bnx2 *bp = netdev_priv(dev);
5699         int support_serdes = 0, support_copper = 0;
5700
5701         cmd->supported = SUPPORTED_Autoneg;
5702         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5703                 support_serdes = 1;
5704                 support_copper = 1;
5705         } else if (bp->phy_port == PORT_FIBRE)
5706                 support_serdes = 1;
5707         else
5708                 support_copper = 1;
5709
5710         if (support_serdes) {
5711                 cmd->supported |= SUPPORTED_1000baseT_Full |
5712                         SUPPORTED_FIBRE;
5713                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5714                         cmd->supported |= SUPPORTED_2500baseX_Full;
5715
5716         }
5717         if (support_copper) {
5718                 cmd->supported |= SUPPORTED_10baseT_Half |
5719                         SUPPORTED_10baseT_Full |
5720                         SUPPORTED_100baseT_Half |
5721                         SUPPORTED_100baseT_Full |
5722                         SUPPORTED_1000baseT_Full |
5723                         SUPPORTED_TP;
5724
5725         }
5726
5727         spin_lock_bh(&bp->phy_lock);
5728         cmd->port = bp->phy_port;
5729         cmd->advertising = bp->advertising;
5730
5731         if (bp->autoneg & AUTONEG_SPEED) {
5732                 cmd->autoneg = AUTONEG_ENABLE;
5733         }
5734         else {
5735                 cmd->autoneg = AUTONEG_DISABLE;
5736         }
5737
5738         if (netif_carrier_ok(dev)) {
5739                 cmd->speed = bp->line_speed;
5740                 cmd->duplex = bp->duplex;
5741         }
5742         else {
5743                 cmd->speed = -1;
5744                 cmd->duplex = -1;
5745         }
5746         spin_unlock_bh(&bp->phy_lock);
5747
5748         cmd->transceiver = XCVR_INTERNAL;
5749         cmd->phy_address = bp->phy_addr;
5750
5751         return 0;
5752 }
5753
5754 static int
5755 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5756 {
5757         struct bnx2 *bp = netdev_priv(dev);
5758         u8 autoneg = bp->autoneg;
5759         u8 req_duplex = bp->req_duplex;
5760         u16 req_line_speed = bp->req_line_speed;
5761         u32 advertising = bp->advertising;
5762         int err = -EINVAL;
5763
5764         spin_lock_bh(&bp->phy_lock);
5765
5766         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5767                 goto err_out_unlock;
5768
5769         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5770                 goto err_out_unlock;
5771
5772         if (cmd->autoneg == AUTONEG_ENABLE) {
5773                 autoneg |= AUTONEG_SPEED;
5774
5775                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5776
5777                 /* allow advertising 1 speed */
5778                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5779                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5780                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5781                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5782
5783                         if (cmd->port == PORT_FIBRE)
5784                                 goto err_out_unlock;
5785
5786                         advertising = cmd->advertising;
5787
5788                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5789                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5790                             (cmd->port == PORT_TP))
5791                                 goto err_out_unlock;
5792                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5793                         advertising = cmd->advertising;
5794                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5795                         goto err_out_unlock;
5796                 else {
5797                         if (cmd->port == PORT_FIBRE)
5798                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5799                         else
5800                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5801                 }
5802                 advertising |= ADVERTISED_Autoneg;
5803         }
5804         else {
5805                 if (cmd->port == PORT_FIBRE) {
5806                         if ((cmd->speed != SPEED_1000 &&
5807                              cmd->speed != SPEED_2500) ||
5808                             (cmd->duplex != DUPLEX_FULL))
5809                                 goto err_out_unlock;
5810
5811                         if (cmd->speed == SPEED_2500 &&
5812                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5813                                 goto err_out_unlock;
5814                 }
5815                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5816                         goto err_out_unlock;
5817
5818                 autoneg &= ~AUTONEG_SPEED;
5819                 req_line_speed = cmd->speed;
5820                 req_duplex = cmd->duplex;
5821                 advertising = 0;
5822         }
5823
5824         bp->autoneg = autoneg;
5825         bp->advertising = advertising;
5826         bp->req_line_speed = req_line_speed;
5827         bp->req_duplex = req_duplex;
5828
5829         err = bnx2_setup_phy(bp, cmd->port);
5830
5831 err_out_unlock:
5832         spin_unlock_bh(&bp->phy_lock);
5833
5834         return err;
5835 }
5836
5837 static void
5838 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5839 {
5840         struct bnx2 *bp = netdev_priv(dev);
5841
5842         strcpy(info->driver, DRV_MODULE_NAME);
5843         strcpy(info->version, DRV_MODULE_VERSION);
5844         strcpy(info->bus_info, pci_name(bp->pdev));
5845         strcpy(info->fw_version, bp->fw_version);
5846 }
5847
5848 #define BNX2_REGDUMP_LEN                (32 * 1024)
5849
5850 static int
5851 bnx2_get_regs_len(struct net_device *dev)
5852 {
5853         return BNX2_REGDUMP_LEN;
5854 }
5855
5856 static void
5857 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5858 {
5859         u32 *p = _p, i, offset;
5860         u8 *orig_p = _p;
5861         struct bnx2 *bp = netdev_priv(dev);
5862         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5863                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5864                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5865                                  0x1040, 0x1048, 0x1080, 0x10a4,
5866                                  0x1400, 0x1490, 0x1498, 0x14f0,
5867                                  0x1500, 0x155c, 0x1580, 0x15dc,
5868                                  0x1600, 0x1658, 0x1680, 0x16d8,
5869                                  0x1800, 0x1820, 0x1840, 0x1854,
5870                                  0x1880, 0x1894, 0x1900, 0x1984,
5871                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5872                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5873                                  0x2000, 0x2030, 0x23c0, 0x2400,
5874                                  0x2800, 0x2820, 0x2830, 0x2850,
5875                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5876                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5877                                  0x4080, 0x4090, 0x43c0, 0x4458,
5878                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5879                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5880                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5881                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5882                                  0x6800, 0x6848, 0x684c, 0x6860,
5883                                  0x6888, 0x6910, 0x8000 };
5884
5885         regs->version = 0;
5886
5887         memset(p, 0, BNX2_REGDUMP_LEN);
5888
5889         if (!netif_running(bp->dev))
5890                 return;
5891
5892         i = 0;
5893         offset = reg_boundaries[0];
5894         p += offset;
5895         while (offset < BNX2_REGDUMP_LEN) {
5896                 *p++ = REG_RD(bp, offset);
5897                 offset += 4;
5898                 if (offset == reg_boundaries[i + 1]) {
5899                         offset = reg_boundaries[i + 2];
5900                         p = (u32 *) (orig_p + offset);
5901                         i += 2;
5902                 }
5903         }
5904 }
5905
5906 static void
5907 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5908 {
5909         struct bnx2 *bp = netdev_priv(dev);
5910
5911         if (bp->flags & NO_WOL_FLAG) {
5912                 wol->supported = 0;
5913                 wol->wolopts = 0;
5914         }
5915         else {
5916                 wol->supported = WAKE_MAGIC;
5917                 if (bp->wol)
5918                         wol->wolopts = WAKE_MAGIC;
5919                 else
5920                         wol->wolopts = 0;
5921         }
5922         memset(&wol->sopass, 0, sizeof(wol->sopass));
5923 }
5924
5925 static int
5926 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5927 {
5928         struct bnx2 *bp = netdev_priv(dev);
5929
5930         if (wol->wolopts & ~WAKE_MAGIC)
5931                 return -EINVAL;
5932
5933         if (wol->wolopts & WAKE_MAGIC) {
5934                 if (bp->flags & NO_WOL_FLAG)
5935                         return -EINVAL;
5936
5937                 bp->wol = 1;
5938         }
5939         else {
5940                 bp->wol = 0;
5941         }
5942         return 0;
5943 }
5944
5945 static int
5946 bnx2_nway_reset(struct net_device *dev)
5947 {
5948         struct bnx2 *bp = netdev_priv(dev);
5949         u32 bmcr;
5950
5951         if (!(bp->autoneg & AUTONEG_SPEED)) {
5952                 return -EINVAL;
5953         }
5954
5955         spin_lock_bh(&bp->phy_lock);
5956
5957         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5958                 int rc;
5959
5960                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5961                 spin_unlock_bh(&bp->phy_lock);
5962                 return rc;
5963         }
5964
5965         /* Force a link down visible on the other side */
5966         if (bp->phy_flags & PHY_SERDES_FLAG) {
5967                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5968                 spin_unlock_bh(&bp->phy_lock);
5969
5970                 msleep(20);
5971
5972                 spin_lock_bh(&bp->phy_lock);
5973
5974                 bp->current_interval = SERDES_AN_TIMEOUT;
5975                 bp->serdes_an_pending = 1;
5976                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5977         }
5978
5979         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5980         bmcr &= ~BMCR_LOOPBACK;
5981         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5982
5983         spin_unlock_bh(&bp->phy_lock);
5984
5985         return 0;
5986 }
5987
5988 static int
5989 bnx2_get_eeprom_len(struct net_device *dev)
5990 {
5991         struct bnx2 *bp = netdev_priv(dev);
5992
5993         if (bp->flash_info == NULL)
5994                 return 0;
5995
5996         return (int) bp->flash_size;
5997 }
5998
5999 static int
6000 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6001                 u8 *eebuf)
6002 {
6003         struct bnx2 *bp = netdev_priv(dev);
6004         int rc;
6005
6006         /* parameters already validated in ethtool_get_eeprom */
6007
6008         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6009
6010         return rc;
6011 }
6012
6013 static int
6014 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6015                 u8 *eebuf)
6016 {
6017         struct bnx2 *bp = netdev_priv(dev);
6018         int rc;
6019
6020         /* parameters already validated in ethtool_set_eeprom */
6021
6022         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6023
6024         return rc;
6025 }
6026
6027 static int
6028 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6029 {
6030         struct bnx2 *bp = netdev_priv(dev);
6031
6032         memset(coal, 0, sizeof(struct ethtool_coalesce));
6033
6034         coal->rx_coalesce_usecs = bp->rx_ticks;
6035         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6036         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6037         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6038
6039         coal->tx_coalesce_usecs = bp->tx_ticks;
6040         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6041         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6042         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6043
6044         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6045
6046         return 0;
6047 }
6048
6049 static int
6050 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6051 {
6052         struct bnx2 *bp = netdev_priv(dev);
6053
6054         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6055         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6056
6057         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6058         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6059
6060         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6061         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6062
6063         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6064         if (bp->rx_quick_cons_trip_int > 0xff)
6065                 bp->rx_quick_cons_trip_int = 0xff;
6066
6067         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6068         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6069
6070