ecfaad102f70c8d984c42471d17d6586acc6c662
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
412         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
414                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
415
416         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
417                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
418
419         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
420 }
421
422 static void
423 bnx2_disable_int_sync(struct bnx2 *bp)
424 {
425         atomic_inc(&bp->intr_sem);
426         bnx2_disable_int(bp);
427         synchronize_irq(bp->pdev->irq);
428 }
429
430 static void
431 bnx2_napi_disable(struct bnx2 *bp)
432 {
433         napi_disable(&bp->bnx2_napi.napi);
434 }
435
436 static void
437 bnx2_napi_enable(struct bnx2 *bp)
438 {
439         napi_enable(&bp->bnx2_napi.napi);
440 }
441
442 static void
443 bnx2_netif_stop(struct bnx2 *bp)
444 {
445         bnx2_disable_int_sync(bp);
446         if (netif_running(bp->dev)) {
447                 bnx2_napi_disable(bp);
448                 netif_tx_disable(bp->dev);
449                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450         }
451 }
452
453 static void
454 bnx2_netif_start(struct bnx2 *bp)
455 {
456         if (atomic_dec_and_test(&bp->intr_sem)) {
457                 if (netif_running(bp->dev)) {
458                         netif_wake_queue(bp->dev);
459                         bnx2_napi_enable(bp);
460                         bnx2_enable_int(bp);
461                 }
462         }
463 }
464
465 static void
466 bnx2_free_mem(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->ctx_pages; i++) {
471                 if (bp->ctx_blk[i]) {
472                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473                                             bp->ctx_blk[i],
474                                             bp->ctx_blk_mapping[i]);
475                         bp->ctx_blk[i] = NULL;
476                 }
477         }
478         if (bp->status_blk) {
479                 pci_free_consistent(bp->pdev, bp->status_stats_size,
480                                     bp->status_blk, bp->status_blk_mapping);
481                 bp->status_blk = NULL;
482                 bp->stats_blk = NULL;
483         }
484         if (bp->tx_desc_ring) {
485                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
486                                     bp->tx_desc_ring, bp->tx_desc_mapping);
487                 bp->tx_desc_ring = NULL;
488         }
489         kfree(bp->tx_buf_ring);
490         bp->tx_buf_ring = NULL;
491         for (i = 0; i < bp->rx_max_ring; i++) {
492                 if (bp->rx_desc_ring[i])
493                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
494                                             bp->rx_desc_ring[i],
495                                             bp->rx_desc_mapping[i]);
496                 bp->rx_desc_ring[i] = NULL;
497         }
498         vfree(bp->rx_buf_ring);
499         bp->rx_buf_ring = NULL;
500         for (i = 0; i < bp->rx_max_pg_ring; i++) {
501                 if (bp->rx_pg_desc_ring[i])
502                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503                                             bp->rx_pg_desc_ring[i],
504                                             bp->rx_pg_desc_mapping[i]);
505                 bp->rx_pg_desc_ring[i] = NULL;
506         }
507         if (bp->rx_pg_ring)
508                 vfree(bp->rx_pg_ring);
509         bp->rx_pg_ring = NULL;
510 }
511
512 static int
513 bnx2_alloc_mem(struct bnx2 *bp)
514 {
515         int i, status_blk_size;
516
517         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
518         if (bp->tx_buf_ring == NULL)
519                 return -ENOMEM;
520
521         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
522                                                 &bp->tx_desc_mapping);
523         if (bp->tx_desc_ring == NULL)
524                 goto alloc_mem_err;
525
526         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
527         if (bp->rx_buf_ring == NULL)
528                 goto alloc_mem_err;
529
530         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
531
532         for (i = 0; i < bp->rx_max_ring; i++) {
533                 bp->rx_desc_ring[i] =
534                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
535                                              &bp->rx_desc_mapping[i]);
536                 if (bp->rx_desc_ring[i] == NULL)
537                         goto alloc_mem_err;
538
539         }
540
541         if (bp->rx_pg_ring_size) {
542                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543                                          bp->rx_max_pg_ring);
544                 if (bp->rx_pg_ring == NULL)
545                         goto alloc_mem_err;
546
547                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548                        bp->rx_max_pg_ring);
549         }
550
551         for (i = 0; i < bp->rx_max_pg_ring; i++) {
552                 bp->rx_pg_desc_ring[i] =
553                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554                                              &bp->rx_pg_desc_mapping[i]);
555                 if (bp->rx_pg_desc_ring[i] == NULL)
556                         goto alloc_mem_err;
557
558         }
559
560         /* Combine status and statistics blocks into one allocation. */
561         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562         bp->status_stats_size = status_blk_size +
563                                 sizeof(struct statistics_block);
564
565         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
566                                               &bp->status_blk_mapping);
567         if (bp->status_blk == NULL)
568                 goto alloc_mem_err;
569
570         memset(bp->status_blk, 0, bp->status_stats_size);
571
572         bp->bnx2_napi.status_blk = bp->status_blk;
573
574         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575                                   status_blk_size);
576
577         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
578
579         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581                 if (bp->ctx_pages == 0)
582                         bp->ctx_pages = 1;
583                 for (i = 0; i < bp->ctx_pages; i++) {
584                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585                                                 BCM_PAGE_SIZE,
586                                                 &bp->ctx_blk_mapping[i]);
587                         if (bp->ctx_blk[i] == NULL)
588                                 goto alloc_mem_err;
589                 }
590         }
591         return 0;
592
593 alloc_mem_err:
594         bnx2_free_mem(bp);
595         return -ENOMEM;
596 }
597
598 static void
599 bnx2_report_fw_link(struct bnx2 *bp)
600 {
601         u32 fw_link_status = 0;
602
603         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604                 return;
605
606         if (bp->link_up) {
607                 u32 bmsr;
608
609                 switch (bp->line_speed) {
610                 case SPEED_10:
611                         if (bp->duplex == DUPLEX_HALF)
612                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
613                         else
614                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
615                         break;
616                 case SPEED_100:
617                         if (bp->duplex == DUPLEX_HALF)
618                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
619                         else
620                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
621                         break;
622                 case SPEED_1000:
623                         if (bp->duplex == DUPLEX_HALF)
624                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625                         else
626                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627                         break;
628                 case SPEED_2500:
629                         if (bp->duplex == DUPLEX_HALF)
630                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631                         else
632                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633                         break;
634                 }
635
636                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638                 if (bp->autoneg) {
639                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
641                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
643
644                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647                         else
648                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649                 }
650         }
651         else
652                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655 }
656
657 static char *
658 bnx2_xceiver_str(struct bnx2 *bp)
659 {
660         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662                  "Copper"));
663 }
664
665 static void
666 bnx2_report_link(struct bnx2 *bp)
667 {
668         if (bp->link_up) {
669                 netif_carrier_on(bp->dev);
670                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671                        bnx2_xceiver_str(bp));
672
673                 printk("%d Mbps ", bp->line_speed);
674
675                 if (bp->duplex == DUPLEX_FULL)
676                         printk("full duplex");
677                 else
678                         printk("half duplex");
679
680                 if (bp->flow_ctrl) {
681                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
682                                 printk(", receive ");
683                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
684                                         printk("& transmit ");
685                         }
686                         else {
687                                 printk(", transmit ");
688                         }
689                         printk("flow control ON");
690                 }
691                 printk("\n");
692         }
693         else {
694                 netif_carrier_off(bp->dev);
695                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696                        bnx2_xceiver_str(bp));
697         }
698
699         bnx2_report_fw_link(bp);
700 }
701
702 static void
703 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704 {
705         u32 local_adv, remote_adv;
706
707         bp->flow_ctrl = 0;
708         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
709                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711                 if (bp->duplex == DUPLEX_FULL) {
712                         bp->flow_ctrl = bp->req_flow_ctrl;
713                 }
714                 return;
715         }
716
717         if (bp->duplex != DUPLEX_FULL) {
718                 return;
719         }
720
721         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723                 u32 val;
724
725                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727                         bp->flow_ctrl |= FLOW_CTRL_TX;
728                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729                         bp->flow_ctrl |= FLOW_CTRL_RX;
730                 return;
731         }
732
733         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
735
736         if (bp->phy_flags & PHY_SERDES_FLAG) {
737                 u32 new_local_adv = 0;
738                 u32 new_remote_adv = 0;
739
740                 if (local_adv & ADVERTISE_1000XPAUSE)
741                         new_local_adv |= ADVERTISE_PAUSE_CAP;
742                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
744                 if (remote_adv & ADVERTISE_1000XPAUSE)
745                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
746                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749                 local_adv = new_local_adv;
750                 remote_adv = new_remote_adv;
751         }
752
753         /* See Table 28B-3 of 802.3ab-1999 spec. */
754         if (local_adv & ADVERTISE_PAUSE_CAP) {
755                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
757                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758                         }
759                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760                                 bp->flow_ctrl = FLOW_CTRL_RX;
761                         }
762                 }
763                 else {
764                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
765                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766                         }
767                 }
768         }
769         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773                         bp->flow_ctrl = FLOW_CTRL_TX;
774                 }
775         }
776 }
777
778 static int
779 bnx2_5709s_linkup(struct bnx2 *bp)
780 {
781         u32 val, speed;
782
783         bp->link_up = 1;
784
785         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790                 bp->line_speed = bp->req_line_speed;
791                 bp->duplex = bp->req_duplex;
792                 return 0;
793         }
794         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795         switch (speed) {
796                 case MII_BNX2_GP_TOP_AN_SPEED_10:
797                         bp->line_speed = SPEED_10;
798                         break;
799                 case MII_BNX2_GP_TOP_AN_SPEED_100:
800                         bp->line_speed = SPEED_100;
801                         break;
802                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804                         bp->line_speed = SPEED_1000;
805                         break;
806                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807                         bp->line_speed = SPEED_2500;
808                         break;
809         }
810         if (val & MII_BNX2_GP_TOP_AN_FD)
811                 bp->duplex = DUPLEX_FULL;
812         else
813                 bp->duplex = DUPLEX_HALF;
814         return 0;
815 }
816
817 static int
818 bnx2_5708s_linkup(struct bnx2 *bp)
819 {
820         u32 val;
821
822         bp->link_up = 1;
823         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825                 case BCM5708S_1000X_STAT1_SPEED_10:
826                         bp->line_speed = SPEED_10;
827                         break;
828                 case BCM5708S_1000X_STAT1_SPEED_100:
829                         bp->line_speed = SPEED_100;
830                         break;
831                 case BCM5708S_1000X_STAT1_SPEED_1G:
832                         bp->line_speed = SPEED_1000;
833                         break;
834                 case BCM5708S_1000X_STAT1_SPEED_2G5:
835                         bp->line_speed = SPEED_2500;
836                         break;
837         }
838         if (val & BCM5708S_1000X_STAT1_FD)
839                 bp->duplex = DUPLEX_FULL;
840         else
841                 bp->duplex = DUPLEX_HALF;
842
843         return 0;
844 }
845
846 static int
847 bnx2_5706s_linkup(struct bnx2 *bp)
848 {
849         u32 bmcr, local_adv, remote_adv, common;
850
851         bp->link_up = 1;
852         bp->line_speed = SPEED_1000;
853
854         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
855         if (bmcr & BMCR_FULLDPLX) {
856                 bp->duplex = DUPLEX_FULL;
857         }
858         else {
859                 bp->duplex = DUPLEX_HALF;
860         }
861
862         if (!(bmcr & BMCR_ANENABLE)) {
863                 return 0;
864         }
865
866         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868
869         common = local_adv & remote_adv;
870         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872                 if (common & ADVERTISE_1000XFULL) {
873                         bp->duplex = DUPLEX_FULL;
874                 }
875                 else {
876                         bp->duplex = DUPLEX_HALF;
877                 }
878         }
879
880         return 0;
881 }
882
883 static int
884 bnx2_copper_linkup(struct bnx2 *bp)
885 {
886         u32 bmcr;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_ANENABLE) {
890                 u32 local_adv, remote_adv, common;
891
892                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895                 common = local_adv & (remote_adv >> 2);
896                 if (common & ADVERTISE_1000FULL) {
897                         bp->line_speed = SPEED_1000;
898                         bp->duplex = DUPLEX_FULL;
899                 }
900                 else if (common & ADVERTISE_1000HALF) {
901                         bp->line_speed = SPEED_1000;
902                         bp->duplex = DUPLEX_HALF;
903                 }
904                 else {
905                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
907
908                         common = local_adv & remote_adv;
909                         if (common & ADVERTISE_100FULL) {
910                                 bp->line_speed = SPEED_100;
911                                 bp->duplex = DUPLEX_FULL;
912                         }
913                         else if (common & ADVERTISE_100HALF) {
914                                 bp->line_speed = SPEED_100;
915                                 bp->duplex = DUPLEX_HALF;
916                         }
917                         else if (common & ADVERTISE_10FULL) {
918                                 bp->line_speed = SPEED_10;
919                                 bp->duplex = DUPLEX_FULL;
920                         }
921                         else if (common & ADVERTISE_10HALF) {
922                                 bp->line_speed = SPEED_10;
923                                 bp->duplex = DUPLEX_HALF;
924                         }
925                         else {
926                                 bp->line_speed = 0;
927                                 bp->link_up = 0;
928                         }
929                 }
930         }
931         else {
932                 if (bmcr & BMCR_SPEED100) {
933                         bp->line_speed = SPEED_100;
934                 }
935                 else {
936                         bp->line_speed = SPEED_10;
937                 }
938                 if (bmcr & BMCR_FULLDPLX) {
939                         bp->duplex = DUPLEX_FULL;
940                 }
941                 else {
942                         bp->duplex = DUPLEX_HALF;
943                 }
944         }
945
946         return 0;
947 }
948
949 static int
950 bnx2_set_mac_link(struct bnx2 *bp)
951 {
952         u32 val;
953
954         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956                 (bp->duplex == DUPLEX_HALF)) {
957                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958         }
959
960         /* Configure the EMAC mode register. */
961         val = REG_RD(bp, BNX2_EMAC_MODE);
962
963         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
964                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
965                 BNX2_EMAC_MODE_25G_MODE);
966
967         if (bp->link_up) {
968                 switch (bp->line_speed) {
969                         case SPEED_10:
970                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
972                                         break;
973                                 }
974                                 /* fall through */
975                         case SPEED_100:
976                                 val |= BNX2_EMAC_MODE_PORT_MII;
977                                 break;
978                         case SPEED_2500:
979                                 val |= BNX2_EMAC_MODE_25G_MODE;
980                                 /* fall through */
981                         case SPEED_1000:
982                                 val |= BNX2_EMAC_MODE_PORT_GMII;
983                                 break;
984                 }
985         }
986         else {
987                 val |= BNX2_EMAC_MODE_PORT_GMII;
988         }
989
990         /* Set the MAC to operate in the appropriate duplex mode. */
991         if (bp->duplex == DUPLEX_HALF)
992                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993         REG_WR(bp, BNX2_EMAC_MODE, val);
994
995         /* Enable/disable rx PAUSE. */
996         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998         if (bp->flow_ctrl & FLOW_CTRL_RX)
999                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002         /* Enable/disable tx PAUSE. */
1003         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006         if (bp->flow_ctrl & FLOW_CTRL_TX)
1007                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010         /* Acknowledge the interrupt. */
1011         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013         return 0;
1014 }
1015
1016 static void
1017 bnx2_enable_bmsr1(struct bnx2 *bp)
1018 {
1019         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020             (CHIP_NUM(bp) == CHIP_NUM_5709))
1021                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022                                MII_BNX2_BLK_ADDR_GP_STATUS);
1023 }
1024
1025 static void
1026 bnx2_disable_bmsr1(struct bnx2 *bp)
1027 {
1028         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5709))
1030                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032 }
1033
1034 static int
1035 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036 {
1037         u32 up1;
1038         int ret = 1;
1039
1040         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041                 return 0;
1042
1043         if (bp->autoneg & AUTONEG_SPEED)
1044                 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
1046         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
1049         bnx2_read_phy(bp, bp->mii_up1, &up1);
1050         if (!(up1 & BCM5708S_UP1_2G5)) {
1051                 up1 |= BCM5708S_UP1_2G5;
1052                 bnx2_write_phy(bp, bp->mii_up1, up1);
1053                 ret = 0;
1054         }
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
1060         return ret;
1061 }
1062
1063 static int
1064 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065 {
1066         u32 up1;
1067         int ret = 0;
1068
1069         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070                 return 0;
1071
1072         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
1075         bnx2_read_phy(bp, bp->mii_up1, &up1);
1076         if (up1 & BCM5708S_UP1_2G5) {
1077                 up1 &= ~BCM5708S_UP1_2G5;
1078                 bnx2_write_phy(bp, bp->mii_up1, up1);
1079                 ret = 1;
1080         }
1081
1082         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
1086         return ret;
1087 }
1088
1089 static void
1090 bnx2_enable_forced_2g5(struct bnx2 *bp)
1091 {
1092         u32 bmcr;
1093
1094         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095                 return;
1096
1097         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098                 u32 val;
1099
1100                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1102                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1112                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114         }
1115
1116         if (bp->autoneg & AUTONEG_SPEED) {
1117                 bmcr &= ~BMCR_ANENABLE;
1118                 if (bp->req_duplex == DUPLEX_FULL)
1119                         bmcr |= BMCR_FULLDPLX;
1120         }
1121         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122 }
1123
1124 static void
1125 bnx2_disable_forced_2g5(struct bnx2 *bp)
1126 {
1127         u32 bmcr;
1128
1129         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130                 return;
1131
1132         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133                 u32 val;
1134
1135                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1137                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED)
1151                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153 }
1154
1155 static int
1156 bnx2_set_link(struct bnx2 *bp)
1157 {
1158         u32 bmsr;
1159         u8 link_up;
1160
1161         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1162                 bp->link_up = 1;
1163                 return 0;
1164         }
1165
1166         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167                 return 0;
1168
1169         link_up = bp->link_up;
1170
1171         bnx2_enable_bmsr1(bp);
1172         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174         bnx2_disable_bmsr1(bp);
1175
1176         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178                 u32 val;
1179
1180                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181                 if (val & BNX2_EMAC_STATUS_LINK)
1182                         bmsr |= BMSR_LSTATUS;
1183                 else
1184                         bmsr &= ~BMSR_LSTATUS;
1185         }
1186
1187         if (bmsr & BMSR_LSTATUS) {
1188                 bp->link_up = 1;
1189
1190                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1191                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192                                 bnx2_5706s_linkup(bp);
1193                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194                                 bnx2_5708s_linkup(bp);
1195                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196                                 bnx2_5709s_linkup(bp);
1197                 }
1198                 else {
1199                         bnx2_copper_linkup(bp);
1200                 }
1201                 bnx2_resolve_flow_ctrl(bp);
1202         }
1203         else {
1204                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1205                     (bp->autoneg & AUTONEG_SPEED))
1206                         bnx2_disable_forced_2g5(bp);
1207
1208                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209                 bp->link_up = 0;
1210         }
1211
1212         if (bp->link_up != link_up) {
1213                 bnx2_report_link(bp);
1214         }
1215
1216         bnx2_set_mac_link(bp);
1217
1218         return 0;
1219 }
1220
1221 static int
1222 bnx2_reset_phy(struct bnx2 *bp)
1223 {
1224         int i;
1225         u32 reg;
1226
1227         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1228
1229 #define PHY_RESET_MAX_WAIT 100
1230         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231                 udelay(10);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1234                 if (!(reg & BMCR_RESET)) {
1235                         udelay(20);
1236                         break;
1237                 }
1238         }
1239         if (i == PHY_RESET_MAX_WAIT) {
1240                 return -EBUSY;
1241         }
1242         return 0;
1243 }
1244
1245 static u32
1246 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247 {
1248         u32 adv = 0;
1249
1250         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP;
1258                 }
1259         }
1260         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262                         adv = ADVERTISE_1000XPSE_ASYM;
1263                 }
1264                 else {
1265                         adv = ADVERTISE_PAUSE_ASYM;
1266                 }
1267         }
1268         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271                 }
1272                 else {
1273                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274                 }
1275         }
1276         return adv;
1277 }
1278
1279 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
1281 static int
1282 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283 {
1284         u32 speed_arg = 0, pause_adv;
1285
1286         pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288         if (bp->autoneg & AUTONEG_SPEED) {
1289                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290                 if (bp->advertising & ADVERTISED_10baseT_Half)
1291                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292                 if (bp->advertising & ADVERTISED_10baseT_Full)
1293                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294                 if (bp->advertising & ADVERTISED_100baseT_Half)
1295                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 if (bp->advertising & ADVERTISED_100baseT_Full)
1297                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302         } else {
1303                 if (bp->req_line_speed == SPEED_2500)
1304                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305                 else if (bp->req_line_speed == SPEED_1000)
1306                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307                 else if (bp->req_line_speed == SPEED_100) {
1308                         if (bp->req_duplex == DUPLEX_FULL)
1309                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310                         else
1311                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312                 } else if (bp->req_line_speed == SPEED_10) {
1313                         if (bp->req_duplex == DUPLEX_FULL)
1314                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315                         else
1316                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317                 }
1318         }
1319
1320         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325         if (port == PORT_TP)
1326                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331         spin_unlock_bh(&bp->phy_lock);
1332         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333         spin_lock_bh(&bp->phy_lock);
1334
1335         return 0;
1336 }
1337
1338 static int
1339 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1340 {
1341         u32 adv, bmcr;
1342         u32 new_adv = 0;
1343
1344         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345                 return (bnx2_setup_remote_phy(bp, port));
1346
1347         if (!(bp->autoneg & AUTONEG_SPEED)) {
1348                 u32 new_bmcr;
1349                 int force_link_down = 0;
1350
1351                 if (bp->req_line_speed == SPEED_2500) {
1352                         if (!bnx2_test_and_enable_2g5(bp))
1353                                 force_link_down = 1;
1354                 } else if (bp->req_line_speed == SPEED_1000) {
1355                         if (bnx2_test_and_disable_2g5(bp))
1356                                 force_link_down = 1;
1357                 }
1358                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
1361                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1362                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1363                 new_bmcr |= BMCR_SPEED1000;
1364
1365                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366                         if (bp->req_line_speed == SPEED_2500)
1367                                 bnx2_enable_forced_2g5(bp);
1368                         else if (bp->req_line_speed == SPEED_1000) {
1369                                 bnx2_disable_forced_2g5(bp);
1370                                 new_bmcr &= ~0x2000;
1371                         }
1372
1373                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1374                         if (bp->req_line_speed == SPEED_2500)
1375                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376                         else
1377                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1378                 }
1379
1380                 if (bp->req_duplex == DUPLEX_FULL) {
1381                         adv |= ADVERTISE_1000XFULL;
1382                         new_bmcr |= BMCR_FULLDPLX;
1383                 }
1384                 else {
1385                         adv |= ADVERTISE_1000XHALF;
1386                         new_bmcr &= ~BMCR_FULLDPLX;
1387                 }
1388                 if ((new_bmcr != bmcr) || (force_link_down)) {
1389                         /* Force a link down visible on the other side */
1390                         if (bp->link_up) {
1391                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1392                                                ~(ADVERTISE_1000XFULL |
1393                                                  ADVERTISE_1000XHALF));
1394                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1395                                         BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397                                 bp->link_up = 0;
1398                                 netif_carrier_off(bp->dev);
1399                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1400                                 bnx2_report_link(bp);
1401                         }
1402                         bnx2_write_phy(bp, bp->mii_adv, adv);
1403                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1404                 } else {
1405                         bnx2_resolve_flow_ctrl(bp);
1406                         bnx2_set_mac_link(bp);
1407                 }
1408                 return 0;
1409         }
1410
1411         bnx2_test_and_enable_2g5(bp);
1412
1413         if (bp->advertising & ADVERTISED_1000baseT_Full)
1414                 new_adv |= ADVERTISE_1000XFULL;
1415
1416         new_adv |= bnx2_phy_get_pause_adv(bp);
1417
1418         bnx2_read_phy(bp, bp->mii_adv, &adv);
1419         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1420
1421         bp->serdes_an_pending = 0;
1422         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423                 /* Force a link down visible on the other side */
1424                 if (bp->link_up) {
1425                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1426                         spin_unlock_bh(&bp->phy_lock);
1427                         msleep(20);
1428                         spin_lock_bh(&bp->phy_lock);
1429                 }
1430
1431                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1433                         BMCR_ANENABLE);
1434                 /* Speed up link-up time when the link partner
1435                  * does not autonegotiate which is very common
1436                  * in blade servers. Some blade servers use
1437                  * IPMI for kerboard input and it's important
1438                  * to minimize link disruptions. Autoneg. involves
1439                  * exchanging base pages plus 3 next pages and
1440                  * normally completes in about 120 msec.
1441                  */
1442                 bp->current_interval = SERDES_AN_TIMEOUT;
1443                 bp->serdes_an_pending = 1;
1444                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1445         } else {
1446                 bnx2_resolve_flow_ctrl(bp);
1447                 bnx2_set_mac_link(bp);
1448         }
1449
1450         return 0;
1451 }
1452
1453 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1454         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1455                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456                 (ADVERTISED_1000baseT_Full)
1457
1458 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1459         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1460         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1461         ADVERTISED_1000baseT_Full)
1462
1463 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1465
1466 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
1468 static void
1469 bnx2_set_default_remote_link(struct bnx2 *bp)
1470 {
1471         u32 link;
1472
1473         if (bp->phy_port == PORT_TP)
1474                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475         else
1476                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479                 bp->req_line_speed = 0;
1480                 bp->autoneg |= AUTONEG_SPEED;
1481                 bp->advertising = ADVERTISED_Autoneg;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483                         bp->advertising |= ADVERTISED_10baseT_Half;
1484                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485                         bp->advertising |= ADVERTISED_10baseT_Full;
1486                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487                         bp->advertising |= ADVERTISED_100baseT_Half;
1488                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489                         bp->advertising |= ADVERTISED_100baseT_Full;
1490                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491                         bp->advertising |= ADVERTISED_1000baseT_Full;
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493                         bp->advertising |= ADVERTISED_2500baseX_Full;
1494         } else {
1495                 bp->autoneg = 0;
1496                 bp->advertising = 0;
1497                 bp->req_duplex = DUPLEX_FULL;
1498                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499                         bp->req_line_speed = SPEED_10;
1500                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501                                 bp->req_duplex = DUPLEX_HALF;
1502                 }
1503                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504                         bp->req_line_speed = SPEED_100;
1505                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506                                 bp->req_duplex = DUPLEX_HALF;
1507                 }
1508                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509                         bp->req_line_speed = SPEED_1000;
1510                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511                         bp->req_line_speed = SPEED_2500;
1512         }
1513 }
1514
1515 static void
1516 bnx2_set_default_link(struct bnx2 *bp)
1517 {
1518         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519                 return bnx2_set_default_remote_link(bp);
1520
1521         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522         bp->req_line_speed = 0;
1523         if (bp->phy_flags & PHY_SERDES_FLAG) {
1524                 u32 reg;
1525
1526                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531                         bp->autoneg = 0;
1532                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1533                         bp->req_duplex = DUPLEX_FULL;
1534                 }
1535         } else
1536                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537 }
1538
1539 static void
1540 bnx2_send_heart_beat(struct bnx2 *bp)
1541 {
1542         u32 msg;
1543         u32 addr;
1544
1545         spin_lock(&bp->indirect_lock);
1546         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550         spin_unlock(&bp->indirect_lock);
1551 }
1552
1553 static void
1554 bnx2_remote_phy_event(struct bnx2 *bp)
1555 {
1556         u32 msg;
1557         u8 link_up = bp->link_up;
1558         u8 old_port;
1559
1560         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
1562         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563                 bnx2_send_heart_beat(bp);
1564
1565         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
1567         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568                 bp->link_up = 0;
1569         else {
1570                 u32 speed;
1571
1572                 bp->link_up = 1;
1573                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574                 bp->duplex = DUPLEX_FULL;
1575                 switch (speed) {
1576                         case BNX2_LINK_STATUS_10HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_10FULL:
1579                                 bp->line_speed = SPEED_10;
1580                                 break;
1581                         case BNX2_LINK_STATUS_100HALF:
1582                                 bp->duplex = DUPLEX_HALF;
1583                         case BNX2_LINK_STATUS_100BASE_T4:
1584                         case BNX2_LINK_STATUS_100FULL:
1585                                 bp->line_speed = SPEED_100;
1586                                 break;
1587                         case BNX2_LINK_STATUS_1000HALF:
1588                                 bp->duplex = DUPLEX_HALF;
1589                         case BNX2_LINK_STATUS_1000FULL:
1590                                 bp->line_speed = SPEED_1000;
1591                                 break;
1592                         case BNX2_LINK_STATUS_2500HALF:
1593                                 bp->duplex = DUPLEX_HALF;
1594                         case BNX2_LINK_STATUS_2500FULL:
1595                                 bp->line_speed = SPEED_2500;
1596                                 break;
1597                         default:
1598                                 bp->line_speed = 0;
1599                                 break;
1600                 }
1601
1602                 spin_lock(&bp->phy_lock);
1603                 bp->flow_ctrl = 0;
1604                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606                         if (bp->duplex == DUPLEX_FULL)
1607                                 bp->flow_ctrl = bp->req_flow_ctrl;
1608                 } else {
1609                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1611                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1613                 }
1614
1615                 old_port = bp->phy_port;
1616                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617                         bp->phy_port = PORT_FIBRE;
1618                 else
1619                         bp->phy_port = PORT_TP;
1620
1621                 if (old_port != bp->phy_port)
1622                         bnx2_set_default_link(bp);
1623
1624                 spin_unlock(&bp->phy_lock);
1625         }
1626         if (bp->link_up != link_up)
1627                 bnx2_report_link(bp);
1628
1629         bnx2_set_mac_link(bp);
1630 }
1631
1632 static int
1633 bnx2_set_remote_link(struct bnx2 *bp)
1634 {
1635         u32 evt_code;
1636
1637         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638         switch (evt_code) {
1639                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640                         bnx2_remote_phy_event(bp);
1641                         break;
1642                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643                 default:
1644                         bnx2_send_heart_beat(bp);
1645                         break;
1646         }
1647         return 0;
1648 }
1649
1650 static int
1651 bnx2_setup_copper_phy(struct bnx2 *bp)
1652 {
1653         u32 bmcr;
1654         u32 new_bmcr;
1655
1656         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1657
1658         if (bp->autoneg & AUTONEG_SPEED) {
1659                 u32 adv_reg, adv1000_reg;
1660                 u32 new_adv_reg = 0;
1661                 u32 new_adv1000_reg = 0;
1662
1663                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1664                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665                         ADVERTISE_PAUSE_ASYM);
1666
1667                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668                 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670                 if (bp->advertising & ADVERTISED_10baseT_Half)
1671                         new_adv_reg |= ADVERTISE_10HALF;
1672                 if (bp->advertising & ADVERTISED_10baseT_Full)
1673                         new_adv_reg |= ADVERTISE_10FULL;
1674                 if (bp->advertising & ADVERTISED_100baseT_Half)
1675                         new_adv_reg |= ADVERTISE_100HALF;
1676                 if (bp->advertising & ADVERTISED_100baseT_Full)
1677                         new_adv_reg |= ADVERTISE_100FULL;
1678                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679                         new_adv1000_reg |= ADVERTISE_1000FULL;
1680
1681                 new_adv_reg |= ADVERTISE_CSMA;
1682
1683                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685                 if ((adv1000_reg != new_adv1000_reg) ||
1686                         (adv_reg != new_adv_reg) ||
1687                         ((bmcr & BMCR_ANENABLE) == 0)) {
1688
1689                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1690                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1691                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1692                                 BMCR_ANENABLE);
1693                 }
1694                 else if (bp->link_up) {
1695                         /* Flow ctrl may have changed from auto to forced */
1696                         /* or vice-versa. */
1697
1698                         bnx2_resolve_flow_ctrl(bp);
1699                         bnx2_set_mac_link(bp);
1700                 }
1701                 return 0;
1702         }
1703
1704         new_bmcr = 0;
1705         if (bp->req_line_speed == SPEED_100) {
1706                 new_bmcr |= BMCR_SPEED100;
1707         }
1708         if (bp->req_duplex == DUPLEX_FULL) {
1709                 new_bmcr |= BMCR_FULLDPLX;
1710         }
1711         if (new_bmcr != bmcr) {
1712                 u32 bmsr;
1713
1714                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1716
1717                 if (bmsr & BMSR_LSTATUS) {
1718                         /* Force link down */
1719                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1720                         spin_unlock_bh(&bp->phy_lock);
1721                         msleep(50);
1722                         spin_lock_bh(&bp->phy_lock);
1723
1724                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1726                 }
1727
1728                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1729
1730                 /* Normally, the new speed is setup after the link has
1731                  * gone down and up again. In some cases, link will not go
1732                  * down so we need to set up the new speed here.
1733                  */
1734                 if (bmsr & BMSR_LSTATUS) {
1735                         bp->line_speed = bp->req_line_speed;
1736                         bp->duplex = bp->req_duplex;
1737                         bnx2_resolve_flow_ctrl(bp);
1738                         bnx2_set_mac_link(bp);
1739                 }
1740         } else {
1741                 bnx2_resolve_flow_ctrl(bp);
1742                 bnx2_set_mac_link(bp);
1743         }
1744         return 0;
1745 }
1746
1747 static int
1748 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1749 {
1750         if (bp->loopback == MAC_LOOPBACK)
1751                 return 0;
1752
1753         if (bp->phy_flags & PHY_SERDES_FLAG) {
1754                 return (bnx2_setup_serdes_phy(bp, port));
1755         }
1756         else {
1757                 return (bnx2_setup_copper_phy(bp));
1758         }
1759 }
1760
1761 static int
1762 bnx2_init_5709s_phy(struct bnx2 *bp)
1763 {
1764         u32 val;
1765
1766         bp->mii_bmcr = MII_BMCR + 0x10;
1767         bp->mii_bmsr = MII_BMSR + 0x10;
1768         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769         bp->mii_adv = MII_ADVERTISE + 0x10;
1770         bp->mii_lpa = MII_LPA + 0x10;
1771         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777         bnx2_reset_phy(bp);
1778
1779         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789                 val |= BCM5708S_UP1_2G5;
1790         else
1791                 val &= ~BCM5708S_UP1_2G5;
1792         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807         return 0;
1808 }
1809
1810 static int
1811 bnx2_init_5708s_phy(struct bnx2 *bp)
1812 {
1813         u32 val;
1814
1815         bnx2_reset_phy(bp);
1816
1817         bp->mii_up1 = BCM5708S_UP1;
1818
1819         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833                 val |= BCM5708S_UP1_2G5;
1834                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835         }
1836
1837         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1838             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1840                 /* increase tx signal amplitude */
1841                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842                                BCM5708S_BLK_ADDR_TX_MISC);
1843                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847         }
1848
1849         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1850               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852         if (val) {
1853                 u32 is_backplane;
1854
1855                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1856                                           BNX2_SHARED_HW_CFG_CONFIG);
1857                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859                                        BCM5708S_BLK_ADDR_TX_MISC);
1860                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862                                        BCM5708S_BLK_ADDR_DIG);
1863                 }
1864         }
1865         return 0;
1866 }
1867
1868 static int
1869 bnx2_init_5706s_phy(struct bnx2 *bp)
1870 {
1871         bnx2_reset_phy(bp);
1872
1873         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
1875         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1877
1878         if (bp->dev->mtu > 1500) {
1879                 u32 val;
1880
1881                 /* Set extended packet length bit */
1882                 bnx2_write_phy(bp, 0x18, 0x7);
1883                 bnx2_read_phy(bp, 0x18, &val);
1884                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887                 bnx2_read_phy(bp, 0x1c, &val);
1888                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889         }
1890         else {
1891                 u32 val;
1892
1893                 bnx2_write_phy(bp, 0x18, 0x7);
1894                 bnx2_read_phy(bp, 0x18, &val);
1895                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898                 bnx2_read_phy(bp, 0x1c, &val);
1899                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900         }
1901
1902         return 0;
1903 }
1904
1905 static int
1906 bnx2_init_copper_phy(struct bnx2 *bp)
1907 {
1908         u32 val;
1909
1910         bnx2_reset_phy(bp);
1911
1912         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913                 bnx2_write_phy(bp, 0x18, 0x0c00);
1914                 bnx2_write_phy(bp, 0x17, 0x000a);
1915                 bnx2_write_phy(bp, 0x15, 0x310b);
1916                 bnx2_write_phy(bp, 0x17, 0x201f);
1917                 bnx2_write_phy(bp, 0x15, 0x9506);
1918                 bnx2_write_phy(bp, 0x17, 0x401f);
1919                 bnx2_write_phy(bp, 0x15, 0x14e2);
1920                 bnx2_write_phy(bp, 0x18, 0x0400);
1921         }
1922
1923         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1926                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927                 val &= ~(1 << 8);
1928                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929         }
1930
1931         if (bp->dev->mtu > 1500) {
1932                 /* Set extended packet length bit */
1933                 bnx2_write_phy(bp, 0x18, 0x7);
1934                 bnx2_read_phy(bp, 0x18, &val);
1935                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937                 bnx2_read_phy(bp, 0x10, &val);
1938                 bnx2_write_phy(bp, 0x10, val | 0x1);
1939         }
1940         else {
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945                 bnx2_read_phy(bp, 0x10, &val);
1946                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947         }
1948
1949         /* ethernet@wirespeed */
1950         bnx2_write_phy(bp, 0x18, 0x7007);
1951         bnx2_read_phy(bp, 0x18, &val);
1952         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1953         return 0;
1954 }
1955
1956
1957 static int
1958 bnx2_init_phy(struct bnx2 *bp)
1959 {
1960         u32 val;
1961         int rc = 0;
1962
1963         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
1966         bp->mii_bmcr = MII_BMCR;
1967         bp->mii_bmsr = MII_BMSR;
1968         bp->mii_bmsr1 = MII_BMSR;
1969         bp->mii_adv = MII_ADVERTISE;
1970         bp->mii_lpa = MII_LPA;
1971
1972         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
1974         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975                 goto setup_phy;
1976
1977         bnx2_read_phy(bp, MII_PHYSID1, &val);
1978         bp->phy_id = val << 16;
1979         bnx2_read_phy(bp, MII_PHYSID2, &val);
1980         bp->phy_id |= val & 0xffff;
1981
1982         if (bp->phy_flags & PHY_SERDES_FLAG) {
1983                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984                         rc = bnx2_init_5706s_phy(bp);
1985                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986                         rc = bnx2_init_5708s_phy(bp);
1987                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988                         rc = bnx2_init_5709s_phy(bp);
1989         }
1990         else {
1991                 rc = bnx2_init_copper_phy(bp);
1992         }
1993
1994 setup_phy:
1995         if (!rc)
1996                 rc = bnx2_setup_phy(bp, bp->phy_port);
1997
1998         return rc;
1999 }
2000
2001 static int
2002 bnx2_set_mac_loopback(struct bnx2 *bp)
2003 {
2004         u32 mac_mode;
2005
2006         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010         bp->link_up = 1;
2011         return 0;
2012 }
2013
2014 static int bnx2_test_link(struct bnx2 *);
2015
2016 static int
2017 bnx2_set_phy_loopback(struct bnx2 *bp)
2018 {
2019         u32 mac_mode;
2020         int rc, i;
2021
2022         spin_lock_bh(&bp->phy_lock);
2023         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2024                             BMCR_SPEED1000);
2025         spin_unlock_bh(&bp->phy_lock);
2026         if (rc)
2027                 return rc;
2028
2029         for (i = 0; i < 10; i++) {
2030                 if (bnx2_test_link(bp) == 0)
2031                         break;
2032                 msleep(100);
2033         }
2034
2035         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2038                       BNX2_EMAC_MODE_25G_MODE);
2039
2040         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042         bp->link_up = 1;
2043         return 0;
2044 }
2045
2046 static int
2047 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2048 {
2049         int i;
2050         u32 val;
2051
2052         bp->fw_wr_seq++;
2053         msg_data |= bp->fw_wr_seq;
2054
2055         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2056
2057         /* wait for an acknowledgement. */
2058         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059                 msleep(10);
2060
2061                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2062
2063                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064                         break;
2065         }
2066         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067                 return 0;
2068
2069         /* If we timed out, inform the firmware that this is the case. */
2070         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071                 if (!silent)
2072                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073                                             "%x\n", msg_data);
2074
2075                 msg_data &= ~BNX2_DRV_MSG_CODE;
2076                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
2078                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2079
2080                 return -EBUSY;
2081         }
2082
2083         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084                 return -EIO;
2085
2086         return 0;
2087 }
2088
2089 static int
2090 bnx2_init_5709_context(struct bnx2 *bp)
2091 {
2092         int i, ret = 0;
2093         u32 val;
2094
2095         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096         val |= (BCM_PAGE_BITS - 8) << 16;
2097         REG_WR(bp, BNX2_CTX_COMMAND, val);
2098         for (i = 0; i < 10; i++) {
2099                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101                         break;
2102                 udelay(2);
2103         }
2104         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105                 return -EBUSY;
2106
2107         for (i = 0; i < bp->ctx_pages; i++) {
2108                 int j;
2109
2110                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114                        (u64) bp->ctx_blk_mapping[i] >> 32);
2115                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117                 for (j = 0; j < 10; j++) {
2118
2119                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121                                 break;
2122                         udelay(5);
2123                 }
2124                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125                         ret = -EBUSY;
2126                         break;
2127                 }
2128         }
2129         return ret;
2130 }
2131
2132 static void
2133 bnx2_init_context(struct bnx2 *bp)
2134 {
2135         u32 vcid;
2136
2137         vcid = 96;
2138         while (vcid) {
2139                 u32 vcid_addr, pcid_addr, offset;
2140                 int i;
2141
2142                 vcid--;
2143
2144                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145                         u32 new_vcid;
2146
2147                         vcid_addr = GET_PCID_ADDR(vcid);
2148                         if (vcid & 0x8) {
2149                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150                         }
2151                         else {
2152                                 new_vcid = vcid;
2153                         }
2154                         pcid_addr = GET_PCID_ADDR(new_vcid);
2155                 }
2156                 else {
2157                         vcid_addr = GET_CID_ADDR(vcid);
2158                         pcid_addr = vcid_addr;
2159                 }
2160
2161                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162                         vcid_addr += (i << PHY_CTX_SHIFT);
2163                         pcid_addr += (i << PHY_CTX_SHIFT);
2164
2165                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2166                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2167
2168                         /* Zero out the context. */
2169                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2170                                 CTX_WR(bp, vcid_addr, offset, 0);
2171                 }
2172         }
2173 }
2174
2175 static int
2176 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177 {
2178         u16 *good_mbuf;
2179         u32 good_mbuf_cnt;
2180         u32 val;
2181
2182         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183         if (good_mbuf == NULL) {
2184                 printk(KERN_ERR PFX "Failed to allocate memory in "
2185                                     "bnx2_alloc_bad_rbuf\n");
2186                 return -ENOMEM;
2187         }
2188
2189         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192         good_mbuf_cnt = 0;
2193
2194         /* Allocate a bunch of mbufs and save the good ones in an array. */
2195         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203                 /* The addresses with Bit 9 set are bad memory blocks. */
2204                 if (!(val & (1 << 9))) {
2205                         good_mbuf[good_mbuf_cnt] = (u16) val;
2206                         good_mbuf_cnt++;
2207                 }
2208
2209                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210         }
2211
2212         /* Free the good ones back to the mbuf pool thus discarding
2213          * all the bad ones. */
2214         while (good_mbuf_cnt) {
2215                 good_mbuf_cnt--;
2216
2217                 val = good_mbuf[good_mbuf_cnt];
2218                 val = (val << 9) | val | 1;
2219
2220                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221         }
2222         kfree(good_mbuf);
2223         return 0;
2224 }
2225
2226 static void
2227 bnx2_set_mac_addr(struct bnx2 *bp)
2228 {
2229         u32 val;
2230         u8 *mac_addr = bp->dev->dev_addr;
2231
2232         val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
2236         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2237                 (mac_addr[4] << 8) | mac_addr[5];
2238
2239         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240 }
2241
2242 static inline int
2243 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244 {
2245         dma_addr_t mapping;
2246         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247         struct rx_bd *rxbd =
2248                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249         struct page *page = alloc_page(GFP_ATOMIC);
2250
2251         if (!page)
2252                 return -ENOMEM;
2253         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254                                PCI_DMA_FROMDEVICE);
2255         rx_pg->page = page;
2256         pci_unmap_addr_set(rx_pg, mapping, mapping);
2257         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259         return 0;
2260 }
2261
2262 static void
2263 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264 {
2265         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266         struct page *page = rx_pg->page;
2267
2268         if (!page)
2269                 return;
2270
2271         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272                        PCI_DMA_FROMDEVICE);
2273
2274         __free_page(page);
2275         rx_pg->page = NULL;
2276 }
2277
2278 static inline int
2279 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2280 {
2281         struct sk_buff *skb;
2282         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283         dma_addr_t mapping;
2284         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2285         unsigned long align;
2286
2287         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2288         if (skb == NULL) {
2289                 return -ENOMEM;
2290         }
2291
2292         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2294
2295         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296                 PCI_DMA_FROMDEVICE);
2297
2298         rx_buf->skb = skb;
2299         pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306         return 0;
2307 }
2308
2309 static int
2310 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2311 {
2312         struct status_block *sblk = bnapi->status_blk;
2313         u32 new_link_state, old_link_state;
2314         int is_set = 1;
2315
2316         new_link_state = sblk->status_attn_bits & event;
2317         old_link_state = sblk->status_attn_bits_ack & event;
2318         if (new_link_state != old_link_state) {
2319                 if (new_link_state)
2320                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321                 else
2322                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323         } else
2324                 is_set = 0;
2325
2326         return is_set;
2327 }
2328
2329 static void
2330 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2331 {
2332         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2333                 spin_lock(&bp->phy_lock);
2334                 bnx2_set_link(bp);
2335                 spin_unlock(&bp->phy_lock);
2336         }
2337         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2338                 bnx2_set_remote_link(bp);
2339
2340 }
2341
2342 static inline u16
2343 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2344 {
2345         u16 cons;
2346
2347         cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2348
2349         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350                 cons++;
2351         return cons;
2352 }
2353
2354 static void
2355 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2356 {
2357         u16 hw_cons, sw_cons, sw_ring_cons;
2358         int tx_free_bd = 0;
2359
2360         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2361         sw_cons = bnapi->tx_cons;
2362
2363         while (sw_cons != hw_cons) {
2364                 struct sw_bd *tx_buf;
2365                 struct sk_buff *skb;
2366                 int i, last;
2367
2368                 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371                 skb = tx_buf->skb;
2372
2373                 /* partial BD completions possible with TSO packets */
2374                 if (skb_is_gso(skb)) {
2375                         u16 last_idx, last_ring_idx;
2376
2377                         last_idx = sw_cons +
2378                                 skb_shinfo(skb)->nr_frags + 1;
2379                         last_ring_idx = sw_ring_cons +
2380                                 skb_shinfo(skb)->nr_frags + 1;
2381                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382                                 last_idx++;
2383                         }
2384                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385                                 break;
2386                         }
2387                 }
2388
2389                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390                         skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392                 tx_buf->skb = NULL;
2393                 last = skb_shinfo(skb)->nr_frags;
2394
2395                 for (i = 0; i < last; i++) {
2396                         sw_cons = NEXT_TX_BD(sw_cons);
2397
2398                         pci_unmap_page(bp->pdev,
2399                                 pci_unmap_addr(
2400                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401                                         mapping),
2402                                 skb_shinfo(skb)->frags[i].size,
2403                                 PCI_DMA_TODEVICE);
2404                 }
2405
2406                 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408                 tx_free_bd += last + 1;
2409
2410                 dev_kfree_skb(skb);
2411
2412                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2413         }
2414
2415         bnapi->hw_tx_cons = hw_cons;
2416         bnapi->tx_cons = sw_cons;
2417         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418          * before checking for netif_queue_stopped().  Without the
2419          * memory barrier, there is a small possibility that bnx2_start_xmit()
2420          * will miss it and cause the queue to be stopped forever.
2421          */
2422         smp_mb();
2423
2424         if (unlikely(netif_queue_stopped(bp->dev)) &&
2425                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2426                 netif_tx_lock(bp->dev);
2427                 if ((netif_queue_stopped(bp->dev)) &&
2428                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2429                         netif_wake_queue(bp->dev);
2430                 netif_tx_unlock(bp->dev);
2431         }
2432 }
2433
2434 static void
2435 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2436                         struct sk_buff *skb, int count)
2437 {
2438         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2439         struct rx_bd *cons_bd, *prod_bd;
2440         dma_addr_t mapping;
2441         int i;
2442         u16 hw_prod = bnapi->rx_pg_prod, prod;
2443         u16 cons = bnapi->rx_pg_cons;
2444
2445         for (i = 0; i < count; i++) {
2446                 prod = RX_PG_RING_IDX(hw_prod);
2447
2448                 prod_rx_pg = &bp->rx_pg_ring[prod];
2449                 cons_rx_pg = &bp->rx_pg_ring[cons];
2450                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2451                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2452
2453                 if (i == 0 && skb) {
2454                         struct page *page;
2455                         struct skb_shared_info *shinfo;
2456
2457                         shinfo = skb_shinfo(skb);
2458                         shinfo->nr_frags--;
2459                         page = shinfo->frags[shinfo->nr_frags].page;
2460                         shinfo->frags[shinfo->nr_frags].page = NULL;
2461                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2462                                                PCI_DMA_FROMDEVICE);
2463                         cons_rx_pg->page = page;
2464                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2465                         dev_kfree_skb(skb);
2466                 }
2467                 if (prod != cons) {
2468                         prod_rx_pg->page = cons_rx_pg->page;
2469                         cons_rx_pg->page = NULL;
2470                         pci_unmap_addr_set(prod_rx_pg, mapping,
2471                                 pci_unmap_addr(cons_rx_pg, mapping));
2472
2473                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2474                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2475
2476                 }
2477                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2478                 hw_prod = NEXT_RX_BD(hw_prod);
2479         }
2480         bnapi->rx_pg_prod = hw_prod;
2481         bnapi->rx_pg_cons = cons;
2482 }
2483
2484 static inline void
2485 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2486         u16 cons, u16 prod)
2487 {
2488         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2489         struct rx_bd *cons_bd, *prod_bd;
2490
2491         cons_rx_buf = &bp->rx_buf_ring[cons];
2492         prod_rx_buf = &bp->rx_buf_ring[prod];
2493
2494         pci_dma_sync_single_for_device(bp->pdev,
2495                 pci_unmap_addr(cons_rx_buf, mapping),
2496                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2497
2498         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2499
2500         prod_rx_buf->skb = skb;
2501
2502         if (cons == prod)
2503                 return;
2504
2505         pci_unmap_addr_set(prod_rx_buf, mapping,
2506                         pci_unmap_addr(cons_rx_buf, mapping));
2507
2508         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2509         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2510         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2511         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2512 }
2513
2514 static int
2515 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2516             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2517             u32 ring_idx)
2518 {
2519         int err;
2520         u16 prod = ring_idx & 0xffff;
2521
2522         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2523         if (unlikely(err)) {
2524                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2525                 if (hdr_len) {
2526                         unsigned int raw_len = len + 4;
2527                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2528
2529                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2530                 }
2531                 return err;
2532         }
2533
2534         skb_reserve(skb, bp->rx_offset);
2535         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2536                          PCI_DMA_FROMDEVICE);
2537
2538         if (hdr_len == 0) {
2539                 skb_put(skb, len);
2540                 return 0;
2541         } else {
2542                 unsigned int i, frag_len, frag_size, pages;
2543                 struct sw_pg *rx_pg;
2544                 u16 pg_cons = bnapi->rx_pg_cons;
2545                 u16 pg_prod = bnapi->rx_pg_prod;
2546
2547                 frag_size = len + 4 - hdr_len;
2548                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2549                 skb_put(skb, hdr_len);
2550
2551                 for (i = 0; i < pages; i++) {
2552                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2553                         if (unlikely(frag_len <= 4)) {
2554                                 unsigned int tail = 4 - frag_len;
2555
2556                                 bnapi->rx_pg_cons = pg_cons;
2557                                 bnapi->rx_pg_prod = pg_prod;
2558                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2559                                                         pages - i);
2560                                 skb->len -= tail;
2561                                 if (i == 0) {
2562                                         skb->tail -= tail;
2563                                 } else {
2564                                         skb_frag_t *frag =
2565                                                 &skb_shinfo(skb)->frags[i - 1];
2566                                         frag->size -= tail;
2567                                         skb->data_len -= tail;
2568                                         skb->truesize -= tail;
2569                                 }
2570                                 return 0;
2571                         }
2572                         rx_pg = &bp->rx_pg_ring[pg_cons];
2573
2574                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2575                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2576
2577                         if (i == pages - 1)
2578                                 frag_len -= 4;
2579
2580                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2581                         rx_pg->page = NULL;
2582
2583                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2584                         if (unlikely(err)) {
2585                                 bnapi->rx_pg_cons = pg_cons;
2586                                 bnapi->rx_pg_prod = pg_prod;
2587                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2588                                                         pages - i);
2589                                 return err;
2590                         }
2591
2592                         frag_size -= frag_len;
2593                         skb->data_len += frag_len;
2594                         skb->truesize += frag_len;
2595                         skb->len += frag_len;
2596
2597                         pg_prod = NEXT_RX_BD(pg_prod);
2598                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2599                 }
2600                 bnapi->rx_pg_prod = pg_prod;
2601                 bnapi->rx_pg_cons = pg_cons;
2602         }
2603         return 0;
2604 }
2605
2606 static inline u16
2607 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2608 {
2609         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2610
2611         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2612                 cons++;
2613         return cons;
2614 }
2615
2616 static int
2617 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2618 {
2619         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2620         struct l2_fhdr *rx_hdr;
2621         int rx_pkt = 0, pg_ring_used = 0;
2622
2623         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2624         sw_cons = bnapi->rx_cons;
2625         sw_prod = bnapi->rx_prod;
2626
2627         /* Memory barrier necessary as speculative reads of the rx
2628          * buffer can be ahead of the index in the status block
2629          */
2630         rmb();
2631         while (sw_cons != hw_cons) {
2632                 unsigned int len, hdr_len;
2633                 u32 status;
2634                 struct sw_bd *rx_buf;
2635                 struct sk_buff *skb;
2636                 dma_addr_t dma_addr;
2637
2638                 sw_ring_cons = RX_RING_IDX(sw_cons);
2639                 sw_ring_prod = RX_RING_IDX(sw_prod);
2640
2641                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2642                 skb = rx_buf->skb;
2643
2644                 rx_buf->skb = NULL;
2645
2646                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2647
2648                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2649                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2650
2651                 rx_hdr = (struct l2_fhdr *) skb->data;
2652                 len = rx_hdr->l2_fhdr_pkt_len;
2653
2654                 if ((status = rx_hdr->l2_fhdr_status) &
2655                         (L2_FHDR_ERRORS_BAD_CRC |
2656                         L2_FHDR_ERRORS_PHY_DECODE |
2657                         L2_FHDR_ERRORS_ALIGNMENT |
2658                         L2_FHDR_ERRORS_TOO_SHORT |
2659                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2660
2661                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2662                                           sw_ring_prod);
2663                         goto next_rx;
2664                 }
2665                 hdr_len = 0;
2666                 if (status & L2_FHDR_STATUS_SPLIT) {
2667                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2668                         pg_ring_used = 1;
2669                 } else if (len > bp->rx_jumbo_thresh) {
2670                         hdr_len = bp->rx_jumbo_thresh;
2671                         pg_ring_used = 1;
2672                 }
2673
2674                 len -= 4;
2675
2676                 if (len <= bp->rx_copy_thresh) {
2677                         struct sk_buff *new_skb;
2678
2679                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2680                         if (new_skb == NULL) {
2681                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2682                                                   sw_ring_prod);
2683                                 goto next_rx;
2684                         }
2685
2686                         /* aligned copy */
2687                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2688                                       new_skb->data, len + 2);
2689                         skb_reserve(new_skb, 2);
2690                         skb_put(new_skb, len);
2691
2692                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2693                                 sw_ring_cons, sw_ring_prod);
2694
2695                         skb = new_skb;
2696                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2697                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2698                         goto next_rx;
2699
2700                 skb->protocol = eth_type_trans(skb, bp->dev);
2701
2702                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2703                         (ntohs(skb->protocol) != 0x8100)) {
2704
2705                         dev_kfree_skb(skb);
2706                         goto next_rx;
2707
2708                 }
2709
2710                 skb->ip_summed = CHECKSUM_NONE;
2711                 if (bp->rx_csum &&
2712                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2713                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2714
2715                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2716                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2717                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2718                 }
2719
2720 #ifdef BCM_VLAN
2721                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2722                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2723                                 rx_hdr->l2_fhdr_vlan_tag);
2724                 }
2725                 else
2726 #endif
2727                         netif_receive_skb(skb);
2728
2729                 bp->dev->last_rx = jiffies;
2730                 rx_pkt++;
2731
2732 next_rx:
2733                 sw_cons = NEXT_RX_BD(sw_cons);
2734                 sw_prod = NEXT_RX_BD(sw_prod);
2735
2736                 if ((rx_pkt == budget))
2737                         break;
2738
2739                 /* Refresh hw_cons to see if there is new work */
2740                 if (sw_cons == hw_cons) {
2741                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2742                         rmb();
2743                 }
2744         }
2745         bnapi->rx_cons = sw_cons;
2746         bnapi->rx_prod = sw_prod;
2747
2748         if (pg_ring_used)
2749                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2750                          bnapi->rx_pg_prod);
2751
2752         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2753
2754         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2755
2756         mmiowb();
2757
2758         return rx_pkt;
2759
2760 }
2761
2762 /* MSI ISR - The only difference between this and the INTx ISR
2763  * is that the MSI interrupt is always serviced.
2764  */
2765 static irqreturn_t
2766 bnx2_msi(int irq, void *dev_instance)
2767 {
2768         struct net_device *dev = dev_instance;
2769         struct bnx2 *bp = netdev_priv(dev);
2770         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2771
2772         prefetch(bnapi->status_blk);
2773         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2774                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2775                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2776
2777         /* Return here if interrupt is disabled. */
2778         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2779                 return IRQ_HANDLED;
2780
2781         netif_rx_schedule(dev, &bnapi->napi);
2782
2783         return IRQ_HANDLED;
2784 }
2785
2786 static irqreturn_t
2787 bnx2_msi_1shot(int irq, void *dev_instance)
2788 {
2789         struct net_device *dev = dev_instance;
2790         struct bnx2 *bp = netdev_priv(dev);
2791         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2792
2793         prefetch(bnapi->status_blk);
2794
2795         /* Return here if interrupt is disabled. */
2796         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2797                 return IRQ_HANDLED;
2798
2799         netif_rx_schedule(dev, &bnapi->napi);
2800
2801         return IRQ_HANDLED;
2802 }
2803
2804 static irqreturn_t
2805 bnx2_interrupt(int irq, void *dev_instance)
2806 {
2807         struct net_device *dev = dev_instance;
2808         struct bnx2 *bp = netdev_priv(dev);
2809         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2810         struct status_block *sblk = bnapi->status_blk;
2811
2812         /* When using INTx, it is possible for the interrupt to arrive
2813          * at the CPU before the status block posted prior to the
2814          * interrupt. Reading a register will flush the status block.
2815          * When using MSI, the MSI message will always complete after
2816          * the status block write.
2817          */
2818         if ((sblk->status_idx == bnapi->last_status_idx) &&
2819             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2820              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2821                 return IRQ_NONE;
2822
2823         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2824                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2825                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2826
2827         /* Read back to deassert IRQ immediately to avoid too many
2828          * spurious interrupts.
2829          */
2830         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2831
2832         /* Return here if interrupt is shared and is disabled. */
2833         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2834                 return IRQ_HANDLED;
2835
2836         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2837                 bnapi->last_status_idx = sblk->status_idx;
2838                 __netif_rx_schedule(dev, &bnapi->napi);
2839         }
2840
2841         return IRQ_HANDLED;
2842 }
2843
2844 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2845                                  STATUS_ATTN_BITS_TIMER_ABORT)
2846
2847 static inline int
2848 bnx2_has_work(struct bnx2_napi *bnapi)
2849 {
2850         struct bnx2 *bp = bnapi->bp;
2851         struct status_block *sblk = bp->status_blk;
2852
2853         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2854             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2855                 return 1;
2856
2857         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2858             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2859                 return 1;
2860
2861         return 0;
2862 }
2863
2864 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2865                           int work_done, int budget)
2866 {
2867         struct status_block *sblk = bnapi->status_blk;
2868         u32 status_attn_bits = sblk->status_attn_bits;
2869         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2870
2871         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2872             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2873
2874                 bnx2_phy_int(bp, bnapi);
2875
2876                 /* This is needed to take care of transient status
2877                  * during link changes.
2878                  */
2879                 REG_WR(bp, BNX2_HC_COMMAND,
2880                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2881                 REG_RD(bp, BNX2_HC_COMMAND);
2882         }
2883
2884         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2885                 bnx2_tx_int(bp, bnapi);
2886
2887         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2888                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2889
2890         return work_done;
2891 }
2892
2893 static int bnx2_poll(struct napi_struct *napi, int budget)
2894 {
2895         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2896         struct bnx2 *bp = bnapi->bp;
2897         int work_done = 0;
2898         struct status_block *sblk = bnapi->status_blk;
2899
2900         while (1) {
2901                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2902
2903                 if (unlikely(work_done >= budget))
2904                         break;
2905
2906                 /* bnapi->last_status_idx is used below to tell the hw how
2907                  * much work has been processed, so we must read it before
2908                  * checking for more work.
2909                  */
2910                 bnapi->last_status_idx = sblk->status_idx;
2911                 rmb();
2912                 if (likely(!bnx2_has_work(bnapi))) {
2913                         netif_rx_complete(bp->dev, napi);
2914                         if (likely(bp->flags & USING_MSI_FLAG)) {
2915                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2917                                        bnapi->last_status_idx);
2918                                 break;
2919                         }
2920                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2922                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2923                                bnapi->last_status_idx);
2924
2925                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2926                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2927                                bnapi->last_status_idx);
2928                         break;
2929                 }
2930         }
2931
2932         return work_done;
2933 }
2934
2935 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2936  * from set_multicast.
2937  */
2938 static void
2939 bnx2_set_rx_mode(struct net_device *dev)
2940 {
2941         struct bnx2 *bp = netdev_priv(dev);
2942         u32 rx_mode, sort_mode;
2943         int i;
2944
2945         spin_lock_bh(&bp->phy_lock);
2946
2947         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2948                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2949         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2950 #ifdef BCM_VLAN
2951         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2952                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2953 #else
2954         if (!(bp->flags & ASF_ENABLE_FLAG))
2955                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2956 #endif
2957         if (dev->flags & IFF_PROMISC) {
2958                 /* Promiscuous mode. */
2959                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2960                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2961                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2962         }
2963         else if (dev->flags & IFF_ALLMULTI) {
2964                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2965                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2966                                0xffffffff);
2967                 }
2968                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2969         }
2970         else {
2971                 /* Accept one or more multicast(s). */
2972                 struct dev_mc_list *mclist;
2973                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2974                 u32 regidx;
2975                 u32 bit;
2976                 u32 crc;
2977
2978                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2979
2980                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2981                      i++, mclist = mclist->next) {
2982
2983                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2984                         bit = crc & 0xff;
2985                         regidx = (bit & 0xe0) >> 5;
2986                         bit &= 0x1f;
2987                         mc_filter[regidx] |= (1 << bit);
2988                 }
2989
2990                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2991                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2992                                mc_filter[i]);
2993                 }
2994
2995                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2996         }
2997
2998         if (rx_mode != bp->rx_mode) {
2999                 bp->rx_mode = rx_mode;
3000                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3001         }
3002
3003         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3004         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3005         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3006
3007         spin_unlock_bh(&bp->phy_lock);
3008 }
3009
3010 static void
3011 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3012         u32 rv2p_proc)
3013 {
3014         int i;
3015         u32 val;
3016
3017
3018         for (i = 0; i < rv2p_code_len; i += 8) {
3019                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3020                 rv2p_code++;
3021                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3022                 rv2p_code++;
3023
3024                 if (rv2p_proc == RV2P_PROC1) {
3025                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3026                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3027                 }
3028                 else {
3029                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3030                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3031                 }
3032         }
3033
3034         /* Reset the processor, un-stall is done later. */
3035         if (rv2p_proc == RV2P_PROC1) {
3036                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3037         }
3038         else {
3039                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3040         }
3041 }
3042
3043 static int
3044 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3045 {
3046         u32 offset;
3047         u32 val;
3048         int rc;
3049
3050         /* Halt the CPU. */
3051         val = REG_RD_IND(bp, cpu_reg->mode);
3052         val |= cpu_reg->mode_value_halt;
3053         REG_WR_IND(bp, cpu_reg->mode, val);
3054         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3055
3056         /* Load the Text area. */
3057         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3058         if (fw->gz_text) {
3059                 int j;
3060
3061                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3062                                        fw->gz_text_len);
3063                 if (rc < 0)
3064                         return rc;
3065
3066                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3067                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3068                 }
3069         }
3070
3071         /* Load the Data area. */
3072         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3073         if (fw->data) {
3074                 int j;
3075
3076                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3077                         REG_WR_IND(bp, offset, fw->data[j]);
3078                 }
3079         }
3080
3081         /* Load the SBSS area. */
3082         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3083         if (fw->sbss_len) {
3084                 int j;
3085
3086                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3087                         REG_WR_IND(bp, offset, 0);
3088                 }
3089         }
3090
3091         /* Load the BSS area. */
3092         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3093         if (fw->bss_len) {
3094                 int j;
3095
3096                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3097                         REG_WR_IND(bp, offset, 0);
3098                 }
3099         }
3100
3101         /* Load the Read-Only area. */
3102         offset = cpu_reg->spad_base +
3103                 (fw->rodata_addr - cpu_reg->mips_view_base);
3104         if (fw->rodata) {
3105                 int j;
3106
3107                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3108                         REG_WR_IND(bp, offset, fw->rodata[j]);
3109                 }
3110         }
3111
3112         /* Clear the pre-fetch instruction. */
3113         REG_WR_IND(bp, cpu_reg->inst, 0);
3114         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3115
3116         /* Start the CPU. */
3117         val = REG_RD_IND(bp, cpu_reg->mode);
3118         val &= ~cpu_reg->mode_value_halt;
3119         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3120         REG_WR_IND(bp, cpu_reg->mode, val);
3121
3122         return 0;
3123 }
3124
3125 static int
3126 bnx2_init_cpus(struct bnx2 *bp)
3127 {
3128         struct cpu_reg cpu_reg;
3129         struct fw_info *fw;
3130         int rc, rv2p_len;
3131         void *text, *rv2p;
3132
3133         /* Initialize the RV2P processor. */
3134         text = vmalloc(FW_BUF_SIZE);
3135         if (!text)
3136                 return -ENOMEM;
3137         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3138                 rv2p = bnx2_xi_rv2p_proc1;
3139                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3140         } else {
3141                 rv2p = bnx2_rv2p_proc1;
3142                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3143         }
3144         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3145         if (rc < 0)
3146                 goto init_cpu_err;
3147
3148         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3149
3150         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3151                 rv2p = bnx2_xi_rv2p_proc2;
3152                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3153         } else {
3154                 rv2p = bnx2_rv2p_proc2;
3155                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3156         }
3157         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3158         if (rc < 0)
3159                 goto init_cpu_err;
3160
3161         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3162
3163         /* Initialize the RX Processor. */
3164         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3165         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3166         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3167         cpu_reg.state = BNX2_RXP_CPU_STATE;
3168         cpu_reg.state_value_clear = 0xffffff;
3169         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3170         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3171         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3172         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3173         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3174         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3175         cpu_reg.mips_view_base = 0x8000000;
3176
3177         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3178                 fw = &bnx2_rxp_fw_09;
3179         else
3180                 fw = &bnx2_rxp_fw_06;
3181
3182         fw->text = text;
3183         rc = load_cpu_fw(bp, &cpu_reg, fw);
3184         if (rc)
3185                 goto init_cpu_err;
3186
3187         /* Initialize the TX Processor. */
3188         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3189         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3190         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3191         cpu_reg.state = BNX2_TXP_CPU_STATE;
3192         cpu_reg.state_value_clear = 0xffffff;
3193         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3194         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3195         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3196         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3197         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3198         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3199         cpu_reg.mips_view_base = 0x8000000;
3200
3201         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3202                 fw = &bnx2_txp_fw_09;
3203         else
3204                 fw = &bnx2_txp_fw_06;
3205
3206         fw->text = text;
3207         rc = load_cpu_fw(bp, &cpu_reg, fw);
3208         if (rc)
3209                 goto init_cpu_err;
3210
3211         /* Initialize the TX Patch-up Processor. */
3212         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3213         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3214         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3215         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3216         cpu_reg.state_value_clear = 0xffffff;
3217         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3218         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3219         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3220         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3221         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3222         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3223         cpu_reg.mips_view_base = 0x8000000;
3224
3225         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3226                 fw = &bnx2_tpat_fw_09;
3227         else
3228                 fw = &bnx2_tpat_fw_06;
3229
3230         fw->text = text;
3231         rc = load_cpu_fw(bp, &cpu_reg, fw);
3232         if (rc)
3233                 goto init_cpu_err;
3234
3235         /* Initialize the Completion Processor. */
3236         cpu_reg.mode = BNX2_COM_CPU_MODE;
3237         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3238         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3239         cpu_reg.state = BNX2_COM_CPU_STATE;
3240         cpu_reg.state_value_clear = 0xffffff;
3241         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3242         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3243         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3244         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3245         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3246         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3247         cpu_reg.mips_view_base = 0x8000000;
3248
3249         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3250                 fw = &bnx2_com_fw_09;
3251         else
3252                 fw = &bnx2_com_fw_06;
3253
3254         fw->text = text;
3255         rc = load_cpu_fw(bp, &cpu_reg, fw);
3256         if (rc)
3257                 goto init_cpu_err;
3258
3259         /* Initialize the Command Processor. */
3260         cpu_reg.mode = BNX2_CP_CPU_MODE;
3261         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3262         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3263         cpu_reg.state = BNX2_CP_CPU_STATE;
3264         cpu_reg.state_value_clear = 0xffffff;
3265         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3266         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3267         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3268         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3269         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3270         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3271         cpu_reg.mips_view_base = 0x8000000;
3272
3273         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3274                 fw = &bnx2_cp_fw_09;
3275         else
3276                 fw = &bnx2_cp_fw_06;
3277
3278         fw->text = text;
3279         rc = load_cpu_fw(bp, &cpu_reg, fw);
3280
3281 init_cpu_err:
3282         vfree(text);
3283         return rc;
3284 }
3285
3286 static int
3287 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3288 {
3289         u16 pmcsr;
3290
3291         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3292
3293         switch (state) {
3294         case PCI_D0: {
3295                 u32 val;
3296
3297                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3298                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3299                         PCI_PM_CTRL_PME_STATUS);
3300
3301                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3302                         /* delay required during transition out of D3hot */
3303                         msleep(20);
3304
3305                 val = REG_RD(bp, BNX2_EMAC_MODE);
3306                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3307                 val &= ~BNX2_EMAC_MODE_MPKT;
3308                 REG_WR(bp, BNX2_EMAC_MODE, val);
3309
3310                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3311                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3312                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3313                 break;
3314         }
3315         case PCI_D3hot: {
3316                 int i;
3317                 u32 val, wol_msg;
3318
3319                 if (bp->wol) {
3320                         u32 advertising;
3321                         u8 autoneg;
3322
3323                         autoneg = bp->autoneg;
3324                         advertising = bp->advertising;
3325
3326                         if (bp->phy_port == PORT_TP) {
3327                                 bp->autoneg = AUTONEG_SPEED;
3328                                 bp->advertising = ADVERTISED_10baseT_Half |
3329                                         ADVERTISED_10baseT_Full |
3330                                         ADVERTISED_100baseT_Half |
3331                                         ADVERTISED_100baseT_Full |
3332                                         ADVERTISED_Autoneg;
3333                         }
3334
3335                         spin_lock_bh(&bp->phy_lock);
3336                         bnx2_setup_phy(bp, bp->phy_port);
3337                         spin_unlock_bh(&bp->phy_lock);
3338
3339                         bp->autoneg = autoneg;
3340                         bp->advertising = advertising;
3341
3342                         bnx2_set_mac_addr(bp);
3343
3344                         val = REG_RD(bp, BNX2_EMAC_MODE);
3345
3346                         /* Enable port mode. */
3347                         val &= ~BNX2_EMAC_MODE_PORT;
3348                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3349                                BNX2_EMAC_MODE_ACPI_RCVD |
3350                                BNX2_EMAC_MODE_MPKT;
3351                         if (bp->phy_port == PORT_TP)
3352                                 val |= BNX2_EMAC_MODE_PORT_MII;
3353                         else {
3354                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3355                                 if (bp->line_speed == SPEED_2500)
3356                                         val |= BNX2_EMAC_MODE_25G_MODE;
3357                         }
3358
3359                         REG_WR(bp, BNX2_EMAC_MODE, val);
3360
3361                         /* receive all multicast */
3362                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3363                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3364                                        0xffffffff);
3365                         }
3366                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3367                                BNX2_EMAC_RX_MODE_SORT_MODE);
3368
3369                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3370                               BNX2_RPM_SORT_USER0_MC_EN;
3371                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3372                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3373                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3374                                BNX2_RPM_SORT_USER0_ENA);
3375
3376                         /* Need to enable EMAC and RPM for WOL. */
3377                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3378                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3379                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3380                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3381
3382                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3383                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3384                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3385
3386                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3387                 }
3388                 else {
3389                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3390                 }
3391
3392                 if (!(bp->flags & NO_WOL_FLAG))
3393                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3394
3395                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3396                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3397                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3398
3399                         if (bp->wol)
3400                                 pmcsr |= 3;
3401                 }
3402                 else {
3403                         pmcsr |= 3;
3404                 }
3405                 if (bp->wol) {
3406                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3407                 }
3408                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3409                                       pmcsr);
3410
3411                 /* No more memory access after this point until
3412                  * device is brought back to D0.
3413                  */
3414                 udelay(50);
3415                 break;
3416         }
3417         default:
3418                 return -EINVAL;
3419         }
3420         return 0;
3421 }
3422
3423 static int
3424 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3425 {
3426         u32 val;
3427         int j;
3428
3429         /* Request access to the flash interface. */
3430         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3431         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3432                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3433                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3434                         break;
3435
3436                 udelay(5);
3437         }
3438
3439         if (j >= NVRAM_TIMEOUT_COUNT)
3440                 return -EBUSY;
3441
3442         return 0;
3443 }
3444
3445 static int
3446 bnx2_release_nvram_lock(struct bnx2 *bp)
3447 {
3448         int j;
3449         u32 val;
3450
3451         /* Relinquish nvram interface. */
3452         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3453
3454         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3455                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3456                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3457                         break;
3458
3459                 udelay(5);
3460         }
3461
3462         if (j >= NVRAM_TIMEOUT_COUNT)
3463                 return -EBUSY;
3464
3465         return 0;
3466 }
3467
3468
3469 static int
3470 bnx2_enable_nvram_write(struct bnx2 *bp)
3471 {
3472         u32 val;
3473
3474         val = REG_RD(bp, BNX2_MISC_CFG);
3475         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3476
3477         if (bp->flash_info->flags & BNX2_NV_WREN) {
3478                 int j;
3479
3480                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3481                 REG_WR(bp, BNX2_NVM_COMMAND,
3482                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3483
3484                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3485                         udelay(5);
3486
3487                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3488                         if (val & BNX2_NVM_COMMAND_DONE)
3489                                 break;
3490                 }
3491
3492                 if (j >= NVRAM_TIMEOUT_COUNT)
3493                         return -EBUSY;
3494         }
3495         return 0;
3496 }
3497
3498 static void
3499 bnx2_disable_nvram_write(struct bnx2 *bp)
3500 {
3501         u32 val;
3502
3503         val = REG_RD(bp, BNX2_MISC_CFG);
3504         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3505 }
3506
3507
3508 static void
3509 bnx2_enable_nvram_access(struct bnx2 *bp)
3510 {
3511         u32 val;
3512
3513         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3514         /* Enable both bits, even on read. */
3515         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3516                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3517 }
3518
3519 static void
3520 bnx2_disable_nvram_access(struct bnx2 *bp)
3521 {
3522         u32 val;
3523
3524         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3525         /* Disable both bits, even after read. */
3526         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3527                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3528                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3529 }
3530
3531 static int
3532 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3533 {
3534         u32 cmd;
3535         int j;
3536
3537         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3538                 /* Buffered flash, no erase needed */
3539                 return 0;
3540
3541         /* Build an erase command */
3542         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3543               BNX2_NVM_COMMAND_DOIT;
3544
3545         /* Need to clear DONE bit separately. */
3546         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3547
3548         /* Address of the NVRAM to read from. */
3549         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3550
3551         /* Issue an erase command. */
3552         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3553
3554         /* Wait for completion. */
3555         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3556                 u32 val;
3557
3558                 udelay(5);
3559
3560                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3561                 if (val & BNX2_NVM_COMMAND_DONE)
3562                         break;
3563         }
3564
3565         if (j >= NVRAM_TIMEOUT_COUNT)
3566                 return -EBUSY;
3567
3568         return 0;
3569 }
3570
3571 static int
3572 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3573 {
3574         u32 cmd;
3575         int j;
3576
3577         /* Build the command word. */
3578         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3579
3580         /* Calculate an offset of a buffered flash, not needed for 5709. */
3581         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3582                 offset = ((offset / bp->flash_info->page_size) <<
3583                            bp->flash_info->page_bits) +
3584                           (offset % bp->flash_info->page_size);
3585         }
3586
3587         /* Need to clear DONE bit separately. */
3588         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3589
3590         /* Address of the NVRAM to read from. */
3591         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3592
3593         /* Issue a read command. */
3594         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3595
3596         /* Wait for completion. */
3597         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3598                 u32 val;
3599
3600                 udelay(5);
3601
3602                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3603                 if (val & BNX2_NVM_COMMAND_DONE) {
3604                         val = REG_RD(bp, BNX2_NVM_READ);
3605
3606                         val = be32_to_cpu(val);
3607                         memcpy(ret_val, &val, 4);
3608                         break;
3609                 }
3610         }
3611         if (j >= NVRAM_TIMEOUT_COUNT)
3612                 return -EBUSY;
3613
3614         return 0;
3615 }
3616
3617
3618 static int
3619 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3620 {
3621         u32 cmd, val32;
3622         int j;
3623
3624         /* Build the command word. */
3625         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3626
3627         /* Calculate an offset of a buffered flash, not needed for 5709. */
3628         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3629                 offset = ((offset / bp->flash_info->page_size) <<
3630                           bp->flash_info->page_bits) +
3631                          (offset % bp->flash_info->page_size);
3632         }
3633
3634         /* Need to clear DONE bit separately. */
3635         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3636
3637         memcpy(&val32, val, 4);
3638         val32 = cpu_to_be32(val32);
3639
3640         /* Write the data. */
3641         REG_WR(bp, BNX2_NVM_WRITE, val32);
3642
3643         /* Address of the NVRAM to write to. */
3644         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3645
3646         /* Issue the write command. */
3647         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3648
3649         /* Wait for completion. */
3650         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3651                 udelay(5);
3652
3653                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3654                         break;
3655         }
3656         if (j >= NVRAM_TIMEOUT_COUNT)
3657                 return -EBUSY;
3658
3659         return 0;
3660 }
3661
3662 static int
3663 bnx2_init_nvram(struct bnx2 *bp)
3664 {
3665         u32 val;
3666         int j, entry_count, rc = 0;
3667         struct flash_spec *flash;
3668
3669         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3670                 bp->flash_info = &flash_5709;
3671                 goto get_flash_size;
3672         }
3673
3674         /* Determine the selected interface. */
3675         val = REG_RD(bp, BNX2_NVM_CFG1);
3676
3677         entry_count = ARRAY_SIZE(flash_table);
3678
3679         if (val & 0x40000000) {
3680
3681                 /* Flash interface has been reconfigured */
3682                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3683                      j++, flash++) {
3684                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3685                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3686                                 bp->flash_info = flash;
3687                                 break;
3688                         }
3689                 }
3690         }
3691         else {
3692                 u32 mask;
3693                 /* Not yet been reconfigured */
3694
3695                 if (val & (1 << 23))
3696                         mask = FLASH_BACKUP_STRAP_MASK;
3697                 else
3698                         mask = FLASH_STRAP_MASK;
3699
3700                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3701                         j++, flash++) {
3702
3703                         if ((val & mask) == (flash->strapping & mask)) {
3704                                 bp->flash_info = flash;
3705
3706                                 /* Request access to the flash interface. */
3707                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3708                                         return rc;
3709
3710                                 /* Enable access to flash interface */
3711                                 bnx2_enable_nvram_access(bp);
3712
3713                                 /* Reconfigure the flash interface */
3714                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3715                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3716                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3717                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3718
3719                                 /* Disable access to flash interface */
3720                                 bnx2_disable_nvram_access(bp);
3721                                 bnx2_release_nvram_lock(bp);
3722
3723                                 break;
3724                         }
3725                 }
3726         } /* if (val & 0x40000000) */
3727
3728         if (j == entry_count) {
3729                 bp->flash_info = NULL;
3730                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3731                 return -ENODEV;
3732         }
3733
3734 get_flash_size:
3735         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3736         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3737         if (val)
3738                 bp->flash_size = val;
3739         else
3740                 bp->flash_size = bp->flash_info->total_size;
3741
3742         return rc;
3743 }
3744
3745 static int
3746 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3747                 int buf_size)
3748 {
3749         int rc = 0;
3750         u32 cmd_flags, offset32, len32, extra;
3751
3752         if (buf_size == 0)
3753                 return 0;
3754
3755         /* Request access to the flash interface. */
3756         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3757                 return rc;
3758
3759         /* Enable access to flash interface */
3760         bnx2_enable_nvram_access(bp);
3761
3762         len32 = buf_size;
3763         offset32 = offset;
3764         extra = 0;
3765
3766         cmd_flags = 0;
3767
3768         if (offset32 & 3) {
3769                 u8 buf[4];
3770                 u32 pre_len;
3771
3772                 offset32 &= ~3;
3773                 pre_len = 4 - (offset & 3);
3774
3775                 if (pre_len >= len32) {
3776                         pre_len = len32;
3777                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3778                                     BNX2_NVM_COMMAND_LAST;
3779                 }
3780                 else {
3781                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3782                 }
3783
3784                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3785
3786                 if (rc)
3787                         return rc;
3788
3789                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3790
3791                 offset32 += 4;
3792                 ret_buf += pre_len;
3793                 len32 -= pre_len;
3794         }
3795         if (len32 & 3) {
3796                 extra = 4 - (len32 & 3);
3797                 len32 = (len32 + 4) & ~3;
3798         }
3799
3800         if (len32 == 4) {
3801                 u8 buf[4];
3802
3803                 if (cmd_flags)
3804                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3805                 else
3806                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3807                                     BNX2_NVM_COMMAND_LAST;
3808
3809                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3810
3811                 memcpy(ret_buf, buf, 4 - extra);
3812         }
3813         else if (len32 > 0) {
3814                 u8 buf[4];
3815
3816                 /* Read the first word. */
3817                 if (cmd_flags)
3818                         cmd_flags = 0;
3819                 else
3820                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3821
3822                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3823
3824                 /* Advance to the next dword. */
3825                 offset32 += 4;
3826                 ret_buf += 4;
3827                 len32 -= 4;
3828
3829                 while (len32 > 4 && rc == 0) {
3830                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3831
3832                         /* Advance to the next dword. */
3833                         offset32 += 4;
3834                         ret_buf += 4;
3835                         len32 -= 4;
3836                 }
3837
3838                 if (rc)
3839                         return rc;
3840
3841                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3842                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3843
3844                 memcpy(ret_buf, buf, 4 - extra);
3845         }
3846
3847         /* Disable access to flash interface */
3848         bnx2_disable_nvram_access(bp);
3849
3850         bnx2_release_nvram_lock(bp);
3851
3852         return rc;
3853 }
3854
3855 static int
3856 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3857                 int buf_size)
3858 {
3859         u32 written, offset32, len32;
3860         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3861         int rc = 0;
3862         int align_start, align_end;
3863
3864         buf = data_buf;
3865         offset32 = offset;
3866         len32 = buf_size;
3867         align_start = align_end = 0;
3868
3869         if ((align_start = (offset32 & 3))) {
3870                 offset32 &= ~3;
3871                 len32 += align_start;
3872                 if (len32 < 4)
3873                         len32 = 4;
3874                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3875                         return rc;
3876         }
3877
3878         if (len32 & 3) {
3879                 align_end = 4 - (len32 & 3);
3880                 len32 += align_end;
3881                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3882                         return rc;
3883         }
3884
3885         if (align_start || align_end) {
3886                 align_buf = kmalloc(len32, GFP_KERNEL);
3887                 if (align_buf == NULL)
3888                         return -ENOMEM;
3889                 if (align_start) {
3890                         memcpy(align_buf, start, 4);
3891                 }
3892                 if (align_end) {
3893                         memcpy(align_buf + len32 - 4, end, 4);
3894                 }
3895                 memcpy(align_buf + align_start, data_buf, buf_size);
3896                 buf = align_buf;
3897         }
3898
3899         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3900                 flash_buffer = kmalloc(264, GFP_KERNEL);
3901                 if (flash_buffer == NULL) {
3902                         rc = -ENOMEM;
3903                         goto nvram_write_end;
3904                 }
3905         }
3906
3907         written = 0;
3908         while ((written < len32) && (rc == 0)) {
3909                 u32 page_start, page_end, data_start, data_end;
3910                 u32 addr, cmd_flags;
3911                 int i;
3912
3913                 /* Find the page_start addr */
3914                 page_start = offset32 + written;
3915                 page_start -= (page_start % bp->flash_info->page_size);
3916                 /* Find the page_end addr */
3917                 page_end = page_start + bp->flash_info->page_size;
3918                 /* Find the data_start addr */
3919                 data_start = (written == 0) ? offset32 : page_start;
3920                 /* Find the data_end addr */
3921                 data_end = (page_end > offset32 + len32) ?
3922                         (offset32 + len32) : page_end;
3923
3924                 /* Request access to the flash interface. */
3925                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3926                         goto nvram_write_end;
3927
3928                 /* Enable access to flash interface */
3929                 bnx2_enable_nvram_access(bp);
3930
3931                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3932                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3933                         int j;
3934
3935                         /* Read the whole page into the buffer
3936                          * (non-buffer flash only) */
3937                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3938                                 if (j == (bp->flash_info->page_size - 4)) {
3939                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3940                                 }
3941                                 rc = bnx2_nvram_read_dword(bp,
3942                                         page_start + j,
3943                                         &flash_buffer[j],
3944                                         cmd_flags);
3945
3946                                 if (rc)
3947                                         goto nvram_write_end;
3948
3949                                 cmd_flags = 0;
3950                         }
3951                 }
3952
3953                 /* Enable writes to flash interface (unlock write-protect) */
3954                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3955                         goto nvram_write_end;
3956
3957                 /* Loop to write back the buffer data from page_start to
3958                  * data_start */
3959                 i = 0;
3960                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3961                         /* Erase the page */
3962                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3963                                 goto nvram_write_end;
3964
3965                         /* Re-enable the write again for the actual write */
3966                         bnx2_enable_nvram_write(bp);
3967
3968                         for (addr = page_start; addr < data_start;
3969                                 addr += 4, i += 4) {
3970
3971                                 rc = bnx2_nvram_write_dword(bp, addr,
3972                                         &flash_buffer[i], cmd_flags);
3973
3974                                 if (rc != 0)
3975                                         goto nvram_write_end;
3976
3977                                 cmd_flags = 0;
3978                         }
3979                 }
3980
3981                 /* Loop to write the new data from data_start to data_end */
3982                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3983                         if ((addr == page_end - 4) ||
3984                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3985                                  (addr == data_end - 4))) {
3986
3987                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3988                         }
3989                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3990                                 cmd_flags);
3991
3992                         if (rc != 0)
3993                                 goto nvram_write_end;
3994
3995                         cmd_flags = 0;
3996                         buf += 4;
3997                 }
3998
3999                 /* Loop to write back the buffer data from data_end
4000                  * to page_end */
4001                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4002                         for (addr = data_end; addr < page_end;
4003                                 addr += 4, i += 4) {
4004
4005                                 if (addr == page_end-4) {
4006                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4007                                 }
4008                                 rc = bnx2_nvram_write_dword(bp, addr,
4009                                         &flash_buffer[i], cmd_flags);
4010
4011                                 if (rc != 0)
4012                                         goto nvram_write_end;
4013
4014                                 cmd_flags = 0;
4015                         }
4016                 }
4017
4018                 /* Disable writes to flash interface (lock write-protect) */
4019                 bnx2_disable_nvram_write(bp);
4020
4021                 /* Disable access to flash interface */
4022                 bnx2_disable_nvram_access(bp);
4023                 bnx2_release_nvram_lock(bp);
4024
4025                 /* Increment written */
4026                 written += data_end - data_start;
4027         }
4028
4029 nvram_write_end:
4030         kfree(flash_buffer);
4031         kfree(align_buf);
4032         return rc;
4033 }
4034
4035 static void
4036 bnx2_init_remote_phy(struct bnx2 *bp)
4037 {
4038         u32 val;
4039
4040         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4041         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4042                 return;
4043
4044         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4045         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4046                 return;
4047
4048         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4049                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4050
4051                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4052                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4053                         bp->phy_port = PORT_FIBRE;
4054                 else
4055                         bp->phy_port = PORT_TP;
4056
4057                 if (netif_running(bp->dev)) {
4058                         u32 sig;
4059
4060                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4061                                 bp->link_up = 1;
4062                                 netif_carrier_on(bp->dev);
4063                         } else {
4064                                 bp->link_up = 0;
4065                                 netif_carrier_off(bp->dev);
4066                         }
4067                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4068                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4069                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4070                                    sig);
4071                 }
4072         }
4073 }
4074
4075 static int
4076 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4077 {
4078         u32 val;
4079         int i, rc = 0;
4080         u8 old_port;
4081
4082         /* Wait for the current PCI transaction to complete before
4083          * issuing a reset. */
4084         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4085                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4086                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4087                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4088                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4089         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4090         udelay(5);
4091
4092         /* Wait for the firmware to tell us it is ok to issue a reset. */
4093         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4094
4095         /* Deposit a driver reset signature so the firmware knows that
4096          * this is a soft reset. */
4097         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4098                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4099
4100         /* Do a dummy read to force the chip to complete all current transaction
4101          * before we issue a reset. */
4102         val = REG_RD(bp, BNX2_MISC_ID);
4103
4104         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4105                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4106                 REG_RD(bp, BNX2_MISC_COMMAND);
4107                 udelay(5);
4108
4109                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4110                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4111
4112                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4113
4114         } else {
4115                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4116                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4117                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4118
4119                 /* Chip reset. */
4120                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4121
4122                 /* Reading back any register after chip reset will hang the
4123                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4124                  * of margin for write posting.
4125                  */
4126                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4127                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4128                         msleep(20);
4129
4130                 /* Reset takes approximate 30 usec */
4131                 for (i = 0; i < 10; i++) {
4132                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4133                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4134                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4135                                 break;
4136                         udelay(10);
4137                 }
4138
4139                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4140                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4141                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4142                         return -EBUSY;
4143                 }
4144         }
4145
4146         /* Make sure byte swapping is properly configured. */
4147         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4148         if (val != 0x01020304) {
4149                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4150                 return -ENODEV;
4151         }
4152
4153         /* Wait for the firmware to finish its initialization. */
4154         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4155         if (rc)
4156                 return rc;
4157
4158         spin_lock_bh(&bp->phy_lock);
4159         old_port = bp->phy_port;
4160         bnx2_init_remote_phy(bp);
4161         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4162                 bnx2_set_default_remote_link(bp);
4163         spin_unlock_bh(&bp->phy_lock);
4164
4165         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4166                 /* Adjust the voltage regular to two steps lower.  The default
4167                  * of this register is 0x0000000e. */
4168                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4169
4170                 /* Remove bad rbuf memory from the free pool. */
4171                 rc = bnx2_alloc_bad_rbuf(bp);
4172         }
4173
4174         return rc;
4175 }
4176
4177 static int
4178 bnx2_init_chip(struct bnx2 *bp)
4179 {
4180         u32 val;
4181         int rc;
4182
4183         /* Make sure the interrupt is not active. */
4184         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4185
4186         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4187               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4188 #ifdef __BIG_ENDIAN
4189               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4190 #endif
4191               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4192               DMA_READ_CHANS << 12 |
4193               DMA_WRITE_CHANS << 16;
4194
4195         val |= (0x2 << 20) | (1 << 11);
4196
4197         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4198                 val |= (1 << 23);
4199
4200         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4201             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4202                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4203
4204         REG_WR(bp, BNX2_DMA_CONFIG, val);
4205
4206         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4207                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4208                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4209                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4210         }
4211
4212         if (bp->flags & PCIX_FLAG) {
4213                 u16 val16;
4214
4215                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4216                                      &val16);
4217                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4218                                       val16 & ~PCI_X_CMD_ERO);
4219         }
4220
4221         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4222                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4223                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4224                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4225
4226         /* Initialize context mapping and zero out the quick contexts.  The
4227          * context block must have already been enabled. */
4228         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4229                 rc = bnx2_init_5709_context(bp);
4230                 if (rc)
4231                         return rc;
4232         } else
4233                 bnx2_init_context(bp);
4234
4235         if ((rc = bnx2_init_cpus(bp)) != 0)
4236                 return rc;
4237
4238         bnx2_init_nvram(bp);
4239
4240         bnx2_set_mac_addr(bp);
4241
4242         val = REG_RD(bp, BNX2_MQ_CONFIG);
4243         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4244         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4245         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4246                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4247
4248         REG_WR(bp, BNX2_MQ_CONFIG, val);
4249
4250         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4251         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4252         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4253
4254         val = (BCM_PAGE_BITS - 8) << 24;
4255         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4256
4257         /* Configure page size. */
4258         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4259         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4260         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4261         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4262
4263         val = bp->mac_addr[0] +
4264               (bp->mac_addr[1] << 8) +
4265               (bp->mac_addr[2] << 16) +
4266               bp->mac_addr[3] +
4267               (bp->mac_addr[4] << 8) +
4268               (bp->mac_addr[5] << 16);
4269         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4270
4271         /* Program the MTU.  Also include 4 bytes for CRC32. */
4272         val = bp->dev->mtu + ETH_HLEN + 4;
4273         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4274                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4275         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4276
4277         bp->bnx2_napi.last_status_idx = 0;
4278         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4279
4280         /* Set up how to generate a link change interrupt. */
4281         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4282
4283         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4284                (u64) bp->status_blk_mapping & 0xffffffff);
4285         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4286
4287         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4288                (u64) bp->stats_blk_mapping & 0xffffffff);
4289         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4290                (u64) bp->stats_blk_mapping >> 32);
4291
4292         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4293                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4294
4295         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4296                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4297
4298         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4299                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4300
4301         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4302
4303         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4304
4305         REG_WR(bp, BNX2_HC_COM_TICKS,
4306                (bp->com_ticks_int << 16) | bp->com_ticks);
4307
4308         REG_WR(bp, BNX2_HC_CMD_TICKS,
4309                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4310
4311         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4312                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4313         else
4314                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4315         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4316
4317         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4318                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4319         else {
4320                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4321                       BNX2_HC_CONFIG_COLLECT_STATS;
4322         }
4323
4324         if (bp->flags & ONE_SHOT_MSI_FLAG)
4325                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4326
4327         REG_WR(bp, BNX2_HC_CONFIG, val);
4328
4329         /* Clear internal stats counters. */
4330         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4331
4332         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4333
4334         /* Initialize the receive filter. */
4335         bnx2_set_rx_mode(bp->dev);
4336
4337         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4338                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4339                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4340                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4341         }
4342         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4343                           0);
4344
4345         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4346         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4347
4348         udelay(20);
4349
4350         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4351
4352         return rc;
4353 }
4354
4355 static void
4356 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4357 {
4358         u32 val, offset0, offset1, offset2, offset3;
4359
4360         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4361                 offset0 = BNX2_L2CTX_TYPE_XI;
4362                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4363                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4364                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4365         } else {
4366                 offset0 = BNX2_L2CTX_TYPE;
4367                 offset1 = BNX2_L2CTX_CMD_TYPE;
4368                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4369                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4370         }
4371         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4372         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4373
4374         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4375         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4376
4377         val = (u64) bp->tx_desc_mapping >> 32;
4378         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4379
4380         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4381         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4382 }
4383
4384 static void
4385 bnx2_init_tx_ring(struct bnx2 *bp)
4386 {
4387         struct tx_bd *txbd;
4388         u32 cid;
4389         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4390
4391         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4392
4393         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4394
4395         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4396         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4397
4398         bp->tx_prod = 0;
4399         bnapi->tx_cons = 0;
4400         bnapi->hw_tx_cons = 0;
4401         bp->tx_prod_bseq = 0;
4402
4403         cid = TX_CID;
4404         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4405         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4406
4407         bnx2_init_tx_context(bp, cid);
4408 }
4409
4410 static void
4411 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4412                      int num_rings)
4413 {
4414         int i;
4415         struct rx_bd *rxbd;
4416
4417         for (i = 0; i < num_rings; i++) {
4418                 int j;
4419
4420                 rxbd = &rx_ring[i][0];
4421                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4422                         rxbd->rx_bd_len = buf_size;
4423                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4424                 }
4425                 if (i == (num_rings - 1))
4426                         j = 0;
4427                 else
4428                         j = i + 1;
4429                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4430                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4431         }
4432 }
4433
4434 static void
4435 bnx2_init_rx_ring(struct bnx2 *bp)
4436 {
4437         int i;
4438         u16 prod, ring_prod;
4439         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4440         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4441
4442         bnapi->rx_prod = 0;
4443         bnapi->rx_cons = 0;
4444         bnapi->rx_prod_bseq = 0;
4445         bnapi->rx_pg_prod = 0;
4446         bnapi->rx_pg_cons = 0;
4447
4448         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4449                              bp->rx_buf_use_size, bp->rx_max_ring);
4450
4451         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4452         if (bp->rx_pg_ring_size) {
4453                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4454                                      bp->rx_pg_desc_mapping,
4455                                      PAGE_SIZE, bp->rx_max_pg_ring);
4456                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4457                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4458                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4459                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4460
4461                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4462                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4463
4464                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4465                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4466
4467                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4468                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4469         }
4470
4471         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4472         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4473         val |= 0x02 << 8;
4474         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4475
4476         val = (u64) bp->rx_desc_mapping[0] >> 32;
4477         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4478
4479         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4480         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4481
4482         ring_prod = prod = bnapi->rx_pg_prod;
4483         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4484                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4485                         break;
4486                 prod = NEXT_RX_BD(prod);
4487                 ring_prod = RX_PG_RING_IDX(prod);
4488         }
4489         bnapi->rx_pg_prod = prod;
4490
4491         ring_prod = prod = bnapi->rx_prod;
4492         for (i = 0; i < bp->rx_ring_size; i++) {
4493                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4494                         break;
4495                 }
4496                 prod = NEXT_RX_BD(prod);
4497                 ring_prod = RX_RING_IDX(prod);
4498         }
4499         bnapi->rx_prod = prod;
4500
4501         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4502                  bnapi->rx_pg_prod);
4503         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4504
4505         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4506 }
4507
4508 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4509 {
4510         u32 max, num_rings = 1;
4511
4512         while (ring_size > MAX_RX_DESC_CNT) {
4513                 ring_size -= MAX_RX_DESC_CNT;
4514                 num_rings++;
4515         }
4516         /* round to next power of 2 */
4517         max = max_size;
4518         while ((max & num_rings) == 0)
4519                 max >>= 1;
4520
4521         if (num_rings != max)
4522                 max <<= 1;
4523
4524         return max;
4525 }
4526
4527 static void
4528 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4529 {
4530         u32 rx_size, rx_space, jumbo_size;
4531
4532         /* 8 for CRC and VLAN */
4533         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4534
4535         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4536                 sizeof(struct skb_shared_info);
4537
4538         bp->rx_copy_thresh = RX_COPY_THRESH;
4539         bp->rx_pg_ring_size = 0;
4540         bp->rx_max_pg_ring = 0;
4541         bp->rx_max_pg_ring_idx = 0;
4542         if (rx_space > PAGE_SIZE) {
4543                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4544
4545                 jumbo_size = size * pages;
4546                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4547                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4548
4549                 bp->rx_pg_ring_size = jumbo_size;
4550                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4551                                                         MAX_RX_PG_RINGS);
4552                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4553                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4554                 bp->rx_copy_thresh = 0;
4555         }
4556
4557         bp->rx_buf_use_size = rx_size;
4558         /* hw alignment */
4559         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4560         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4561         bp->rx_ring_size = size;
4562         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4563         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4564 }
4565
4566 static void
4567 bnx2_free_tx_skbs(struct bnx2 *bp)
4568 {
4569         int i;
4570
4571         if (bp->tx_buf_ring == NULL)
4572                 return;
4573
4574         for (i = 0; i < TX_DESC_CNT; ) {
4575                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4576                 struct sk_buff *skb = tx_buf->skb;
4577                 int j, last;
4578
4579                 if (skb == NULL) {
4580                         i++;
4581                         continue;
4582                 }
4583
4584                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4585                         skb_headlen(skb), PCI_DMA_TODEVICE);
4586
4587                 tx_buf->skb = NULL;
4588
4589                 last = skb_shinfo(skb)->nr_frags;
4590                 for (j = 0; j < last; j++) {
4591                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4592                         pci_unmap_page(bp->pdev,
4593                                 pci_unmap_addr(tx_buf, mapping),
4594                                 skb_shinfo(skb)->frags[j].size,
4595                                 PCI_DMA_TODEVICE);
4596                 }
4597                 dev_kfree_skb(skb);
4598                 i += j + 1;
4599         }
4600
4601 }
4602
4603 static void
4604 bnx2_free_rx_skbs(struct bnx2 *bp)
4605 {
4606         int i;
4607
4608         if (bp->rx_buf_ring == NULL)
4609                 return;
4610
4611         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4612                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4613                 struct sk_buff *skb = rx_buf->skb;
4614
4615                 if (skb == NULL)
4616                         continue;
4617
4618                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4619                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4620
4621                 rx_buf->skb = NULL;
4622
4623                 dev_kfree_skb(skb);
4624         }
4625         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4626                 bnx2_free_rx_page(bp, i);
4627 }
4628
4629 static void
4630 bnx2_free_skbs(struct bnx2 *bp)
4631 {
4632         bnx2_free_tx_skbs(bp);
4633         bnx2_free_rx_skbs(bp);
4634 }
4635
4636 static int
4637 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4638 {
4639         int rc;
4640
4641         rc = bnx2_reset_chip(bp, reset_code);
4642         bnx2_free_skbs(bp);
4643         if (rc)
4644                 return rc;
4645
4646         if ((rc = bnx2_init_chip(bp)) != 0)
4647                 return rc;
4648
4649         bnx2_init_tx_ring(bp);
4650         bnx2_init_rx_ring(bp);
4651         return 0;
4652 }
4653
4654 static int
4655 bnx2_init_nic(struct bnx2 *bp)
4656 {
4657         int rc;
4658
4659         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4660                 return rc;
4661
4662         spin_lock_bh(&bp->phy_lock);
4663         bnx2_init_phy(bp);
4664         bnx2_set_link(bp);
4665         spin_unlock_bh(&bp->phy_lock);
4666         return 0;
4667 }
4668
4669 static int
4670 bnx2_test_registers(struct bnx2 *bp)
4671 {
4672         int ret;
4673         int i, is_5709;
4674         static const struct {
4675                 u16   offset;
4676                 u16   flags;
4677 #define BNX2_FL_NOT_5709        1
4678                 u32   rw_mask;
4679                 u32   ro_mask;
4680         } reg_tbl[] = {
4681                 { 0x006c, 0, 0x00000000, 0x0000003f },
4682                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4683                 { 0x0094, 0, 0x00000000, 0x00000000 },
4684
4685                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4686                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4687                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4688                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4689                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4690                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4691                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4692                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4693                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4694
4695                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4696                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4697                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4698                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4699                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4700                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4701
4702                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4703                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4704                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4705
4706                 { 0x1000, 0, 0x00000000, 0x00000001 },
4707                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4708
4709                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4710                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4711                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4712                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4713                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4714                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4715                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4716                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4717                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4718                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4719
4720                 { 0x1800, 0, 0x00000000, 0x00000001 },
4721                 { 0x1804, 0, 0x00000000, 0x00000003 },
4722
4723                 { 0x2800, 0, 0x00000000, 0x00000001 },
4724                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4725                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4726                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4727                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4728                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4729                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4730                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4731                 { 0x2840, 0, 0x00000000, 0xffffffff },
4732                 { 0x2844, 0, 0x00000000, 0xffffffff },
4733                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4734                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4735
4736                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4737                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4738
4739                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4740                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4741                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4742                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4743                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4744                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4745                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4746                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4747                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4748
4749                 { 0x5004, 0, 0x00000000, 0x0000007f },
4750                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4751
4752                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4753                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4754                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4755                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4756                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4757                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4758                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4759                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4760                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4761
4762                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4763                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4764                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4765                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4766                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4767                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4768                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4769                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4770                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4771                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4772                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4773                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4774                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4775                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4776                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4777                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4778                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4779                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4780                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4781                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4782                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4783                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4784                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4785
4786                 { 0xffff, 0, 0x00000000, 0x00000000 },
4787         };
4788
4789         ret = 0;
4790         is_5709 = 0;
4791         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4792                 is_5709 = 1;
4793
4794         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4795                 u32 offset, rw_mask, ro_mask, save_val, val;
4796                 u16 flags = reg_tbl[i].flags;
4797
4798                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4799                         continue;
4800
4801                 offset = (u32) reg_tbl[i].offset;
4802                 rw_mask = reg_tbl[i].rw_mask;
4803                 ro_mask = reg_tbl[i].ro_mask;
4804
4805                 save_val = readl(bp->regview + offset);
4806
4807                 writel(0, bp->regview + offset);
4808
4809                 val = readl(bp->regview + offset);
4810                 if ((val & rw_mask) != 0) {
4811                         goto reg_test_err;
4812                 }
4813
4814                 if ((val & ro_mask) != (save_val & ro_mask)) {
4815                         goto reg_test_err;
4816                 }
4817
4818                 writel(0xffffffff, bp->regview + offset);
4819
4820                 val = readl(bp->regview + offset);
4821                 if ((val & rw_mask) != rw_mask) {
4822                         goto reg_test_err;
4823                 }
4824
4825                 if ((val & ro_mask) != (save_val & ro_mask)) {
4826                         goto reg_test_err;
4827                 }
4828
4829                 writel(save_val, bp->regview + offset);
4830                 continue;
4831
4832 reg_test_err:
4833                 writel(save_val, bp->regview + offset);
4834                 ret = -ENODEV;
4835                 break;
4836         }
4837         return ret;
4838 }
4839
4840 static int
4841 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4842 {
4843         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4844                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4845         int i;
4846
4847         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4848                 u32 offset;
4849
4850                 for (offset = 0; offset < size; offset += 4) {
4851
4852                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4853
4854                         if (REG_RD_IND(bp, start + offset) !=
4855                                 test_pattern[i]) {
4856                                 return -ENODEV;
4857                         }
4858                 }
4859         }
4860         return 0;
4861 }
4862
4863 static int
4864 bnx2_test_memory(struct bnx2 *bp)
4865 {
4866         int ret = 0;
4867         int i;
4868         static struct mem_entry {
4869                 u32   offset;
4870                 u32   len;
4871         } mem_tbl_5706[] = {
4872                 { 0x60000,  0x4000 },
4873                 { 0xa0000,  0x3000 },
4874                 { 0xe0000,  0x4000 },
4875                 { 0x120000, 0x4000 },
4876                 { 0x1a0000, 0x4000 },
4877                 { 0x160000, 0x4000 },
4878                 { 0xffffffff, 0    },
4879         },
4880         mem_tbl_5709[] = {
4881                 { 0x60000,  0x4000 },
4882                 { 0xa0000,  0x3000 },
4883                 { 0xe0000,  0x4000 },
4884                 { 0x120000, 0x4000 },
4885                 { 0x1a0000, 0x4000 },
4886                 { 0xffffffff, 0    },
4887         };
4888         struct mem_entry *mem_tbl;
4889
4890         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4891                 mem_tbl = mem_tbl_5709;
4892         else
4893                 mem_tbl = mem_tbl_5706;
4894
4895         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4896                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4897                         mem_tbl[i].len)) != 0) {
4898                         return ret;
4899                 }
4900         }
4901
4902         return ret;
4903 }
4904
4905 #define BNX2_MAC_LOOPBACK       0
4906 #define BNX2_PHY_LOOPBACK       1
4907
4908 static int
4909 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4910 {
4911         unsigned int pkt_size, num_pkts, i;
4912         struct sk_buff *skb, *rx_skb;
4913         unsigned char *packet;
4914         u16 rx_start_idx, rx_idx;
4915         dma_addr_t map;
4916         struct tx_bd *txbd;
4917         struct sw_bd *rx_buf;
4918         struct l2_fhdr *rx_hdr;
4919         int ret = -ENODEV;
4920         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4921
4922         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4923                 bp->loopback = MAC_LOOPBACK;
4924                 bnx2_set_mac_loopback(bp);
4925         }
4926         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4927                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4928                         return 0;
4929
4930                 bp->loopback = PHY_LOOPBACK;
4931                 bnx2_set_phy_loopback(bp);
4932         }
4933         else
4934                 return -EINVAL;
4935
4936         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4937         skb = netdev_alloc_skb(bp->dev, pkt_size);
4938         if (!skb)
4939                 return -ENOMEM;
4940         packet = skb_put(skb, pkt_size);
4941         memcpy(packet, bp->dev->dev_addr, 6);
4942         memset(packet + 6, 0x0, 8);
4943         for (i = 14; i < pkt_size; i++)
4944                 packet[i] = (unsigned char) (i & 0xff);
4945
4946         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4947                 PCI_DMA_TODEVICE);
4948
4949         REG_WR(bp, BNX2_HC_COMMAND,
4950                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4951
4952         REG_RD(bp, BNX2_HC_COMMAND);
4953
4954         udelay(5);
4955         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
4956
4957         num_pkts = 0;
4958
4959         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4960
4961         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4962         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4963         txbd->tx_bd_mss_nbytes = pkt_size;
4964         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4965
4966         num_pkts++;
4967         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4968         bp->tx_prod_bseq += pkt_size;
4969
4970         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4971         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4972
4973         udelay(100);
4974
4975         REG_WR(bp, BNX2_HC_COMMAND,
4976                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4977
4978         REG_RD(bp, BNX2_HC_COMMAND);
4979
4980         udelay(5);
4981
4982         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4983         dev_kfree_skb(skb);
4984
4985         if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
4986                 goto loopback_test_done;
4987
4988         rx_idx = bnx2_get_hw_rx_cons(bnapi);
4989         if (rx_idx != rx_start_idx + num_pkts) {
4990                 goto loopback_test_done;
4991         }
4992
4993         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4994         rx_skb = rx_buf->skb;
4995
4996         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4997         skb_reserve(rx_skb, bp->rx_offset);
4998
4999         pci_dma_sync_single_for_cpu(bp->pdev,
5000                 pci_unmap_addr(rx_buf, mapping),
5001                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5002
5003         if (rx_hdr->l2_fhdr_status &
5004                 (L2_FHDR_ERRORS_BAD_CRC |
5005                 L2_FHDR_ERRORS_PHY_DECODE |
5006                 L2_FHDR_ERRORS_ALIGNMENT |
5007                 L2_FHDR_ERRORS_TOO_SHORT |
5008                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5009
5010                 goto loopback_test_done;
5011         }
5012
5013         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5014                 goto loopback_test_done;
5015         }
5016
5017         for (i = 14; i < pkt_size; i++) {
5018                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5019                         goto loopback_test_done;
5020                 }
5021         }
5022
5023         ret = 0;
5024
5025 loopback_test_done:
5026         bp->loopback = 0;
5027         return ret;
5028 }
5029
5030 #define BNX2_MAC_LOOPBACK_FAILED        1
5031 #define BNX2_PHY_LOOPBACK_FAILED        2
5032 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5033                                          BNX2_PHY_LOOPBACK_FAILED)
5034
5035 static int
5036 bnx2_test_loopback(struct bnx2 *bp)
5037 {
5038         int rc = 0;
5039
5040         if (!netif_running(bp->dev))
5041                 return BNX2_LOOPBACK_FAILED;
5042
5043         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5044         spin_lock_bh(&bp->phy_lock);
5045         bnx2_init_phy(bp);
5046         spin_unlock_bh(&bp->phy_lock);
5047         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5048                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5049         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5050                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5051         return rc;
5052 }
5053
5054 #define NVRAM_SIZE 0x200
5055 #define CRC32_RESIDUAL 0xdebb20e3
5056
5057 static int
5058 bnx2_test_nvram(struct bnx2 *bp)
5059 {
5060         u32 buf[NVRAM_SIZE / 4];
5061         u8 *data = (u8 *) buf;
5062         int rc = 0;
5063         u32 magic, csum;
5064
5065         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5066                 goto test_nvram_done;
5067
5068         magic = be32_to_cpu(buf[0]);
5069         if (magic != 0x669955aa) {
5070                 rc = -ENODEV;
5071                 goto test_nvram_done;
5072         }
5073
5074         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5075                 goto test_nvram_done;
5076
5077         csum = ether_crc_le(0x100, data);
5078         if (csum != CRC32_RESIDUAL) {
5079                 rc = -ENODEV;
5080                 goto test_nvram_done;
5081         }
5082
5083         csum = ether_crc_le(0x100, data + 0x100);
5084         if (csum != CRC32_RESIDUAL) {
5085                 rc = -ENODEV;
5086         }
5087
5088 test_nvram_done:
5089         return rc;
5090 }
5091
5092 static int
5093 bnx2_test_link(struct bnx2 *bp)
5094 {
5095         u32 bmsr;
5096
5097         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5098                 if (bp->link_up)
5099                         return 0;
5100                 return -ENODEV;
5101         }
5102         spin_lock_bh(&bp->phy_lock);
5103         bnx2_enable_bmsr1(bp);
5104         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5105         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5106         bnx2_disable_bmsr1(bp);
5107         spin_unlock_bh(&bp->phy_lock);
5108
5109         if (bmsr & BMSR_LSTATUS) {
5110                 return 0;
5111         }
5112         return -ENODEV;
5113 }
5114
5115 static int
5116 bnx2_test_intr(struct bnx2 *bp)
5117 {
5118         int i;
5119         u16 status_idx;
5120
5121         if (!netif_running(bp->dev))
5122                 return -ENODEV;
5123
5124         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5125
5126         /* This register is not touched during run-time. */
5127         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5128         REG_RD(bp, BNX2_HC_COMMAND);
5129
5130         for (i = 0; i < 10; i++) {
5131                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5132                         status_idx) {
5133
5134                         break;
5135                 }
5136
5137                 msleep_interruptible(10);
5138         }
5139         if (i < 10)
5140                 return 0;
5141
5142         return -ENODEV;
5143 }
5144
5145 static void
5146 bnx2_5706_serdes_timer(struct bnx2 *bp)
5147 {
5148         spin_lock(&bp->phy_lock);
5149         if (bp->serdes_an_pending)
5150                 bp->serdes_an_pending--;
5151         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5152                 u32 bmcr;
5153
5154                 bp->current_interval = bp->timer_interval;
5155
5156                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5157
5158                 if (bmcr & BMCR_ANENABLE) {
5159                         u32 phy1, phy2;
5160
5161                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5162                         bnx2_read_phy(bp, 0x1c, &phy1);
5163
5164                         bnx2_write_phy(bp, 0x17, 0x0f01);
5165                         bnx2_read_phy(bp, 0x15, &phy2);
5166                         bnx2_write_phy(bp, 0x17, 0x0f01);
5167                         bnx2_read_phy(bp, 0x15, &phy2);
5168
5169                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5170                                 !(phy2 & 0x20)) {       /* no CONFIG */
5171
5172                                 bmcr &= ~BMCR_ANENABLE;
5173                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5174                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5175                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5176                         }
5177                 }
5178         }
5179         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5180                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5181                 u32 phy2;
5182
5183                 bnx2_write_phy(bp, 0x17, 0x0f01);
5184                 bnx2_read_phy(bp, 0x15, &phy2);
5185                 if (phy2 & 0x20) {
5186                         u32 bmcr;
5187
5188                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5189                         bmcr |= BMCR_ANENABLE;
5190                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5191
5192                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5193                 }
5194         } else
5195                 bp->current_interval = bp->timer_interval;
5196
5197         spin_unlock(&bp->phy_lock);
5198 }
5199
5200 static void
5201 bnx2_5708_serdes_timer(struct bnx2 *bp)
5202 {
5203         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5204                 return;
5205
5206         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5207                 bp->serdes_an_pending = 0;
5208                 return;
5209         }
5210
5211         spin_lock(&bp->phy_lock);
5212         if (bp->serdes_an_pending)
5213                 bp->serdes_an_pending--;
5214         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5215                 u32 bmcr;
5216
5217                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5218                 if (bmcr & BMCR_ANENABLE) {
5219                         bnx2_enable_forced_2g5(bp);
5220                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5221                 } else {
5222                         bnx2_disable_forced_2g5(bp);
5223                         bp->serdes_an_pending = 2;
5224                         bp->current_interval = bp->timer_interval;
5225                 }
5226
5227         } else
5228                 bp->current_interval = bp->timer_interval;
5229
5230         spin_unlock(&bp->phy_lock);
5231 }
5232
5233 static void
5234 bnx2_timer(unsigned long data)
5235 {
5236         struct bnx2 *bp = (struct bnx2 *) data;
5237
5238         if (!netif_running(bp->dev))
5239                 return;
5240
5241         if (atomic_read(&bp->intr_sem) != 0)
5242                 goto bnx2_restart_timer;
5243
5244         bnx2_send_heart_beat(bp);
5245
5246         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5247
5248         /* workaround occasional corrupted counters */
5249         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5250                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5251                                             BNX2_HC_COMMAND_STATS_NOW);
5252
5253         if (bp->phy_flags & PHY_SERDES_FLAG) {
5254                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5255                         bnx2_5706_serdes_timer(bp);
5256                 else
5257                         bnx2_5708_serdes_timer(bp);
5258         }
5259
5260 bnx2_restart_timer:
5261         mod_timer(&bp->timer, jiffies + bp->current_interval);
5262 }
5263
5264 static int
5265 bnx2_request_irq(struct bnx2 *bp)
5266 {
5267         struct net_device *dev = bp->dev;
5268         unsigned long flags;
5269         struct bnx2_irq *irq = &bp->irq_tbl[0];
5270         int rc;
5271
5272         if (bp->flags & USING_MSI_FLAG)
5273                 flags = 0;
5274         else
5275                 flags = IRQF_SHARED;
5276         rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
5277         return rc;
5278 }
5279
5280 static void
5281 bnx2_free_irq(struct bnx2 *bp)
5282 {
5283         struct net_device *dev = bp->dev;
5284
5285         free_irq(bp->irq_tbl[0].vector, dev);
5286         if (bp->flags & USING_MSI_FLAG) {
5287                 pci_disable_msi(bp->pdev);
5288                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5289         }
5290 }
5291
5292 static void
5293 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5294 {
5295         bp->irq_tbl[0].handler = bnx2_interrupt;
5296         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5297
5298         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5299                 if (pci_enable_msi(bp->pdev) == 0) {
5300                         bp->flags |= USING_MSI_FLAG;
5301                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5302                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5303                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5304                         } else
5305                                 bp->irq_tbl[0].handler = bnx2_msi;
5306                 }
5307         }
5308
5309         bp->irq_tbl[0].vector = bp->pdev->irq;
5310 }
5311
5312 /* Called with rtnl_lock */
5313 static int
5314 bnx2_open(struct net_device *dev)
5315 {
5316         struct bnx2 *bp = netdev_priv(dev);
5317         int rc;
5318
5319         netif_carrier_off(dev);
5320
5321         bnx2_set_power_state(bp, PCI_D0);
5322         bnx2_disable_int(bp);
5323
5324         rc = bnx2_alloc_mem(bp);
5325         if (rc)
5326                 return rc;
5327
5328         bnx2_setup_int_mode(bp, disable_msi);
5329         bnx2_napi_enable(bp);
5330         rc = bnx2_request_irq(bp);
5331
5332         if (rc) {
5333                 bnx2_napi_disable(bp);
5334                 bnx2_free_mem(bp);
5335                 return rc;
5336         }
5337
5338         rc = bnx2_init_nic(bp);
5339
5340         if (rc) {
5341                 bnx2_napi_disable(bp);
5342                 bnx2_free_irq(bp);
5343                 bnx2_free_skbs(bp);
5344                 bnx2_free_mem(bp);
5345                 return rc;
5346         }
5347
5348         mod_timer(&bp->timer, jiffies + bp->current_interval);
5349
5350         atomic_set(&bp->intr_sem, 0);
5351
5352         bnx2_enable_int(bp);
5353
5354         if (bp->flags & USING_MSI_FLAG) {
5355                 /* Test MSI to make sure it is working
5356                  * If MSI test fails, go back to INTx mode
5357                  */
5358                 if (bnx2_test_intr(bp) != 0) {
5359                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5360                                " using MSI, switching to INTx mode. Please"
5361                                " report this failure to the PCI maintainer"
5362                                " and include system chipset information.\n",
5363                                bp->dev->name);
5364
5365                         bnx2_disable_int(bp);
5366                         bnx2_free_irq(bp);
5367
5368                         bnx2_setup_int_mode(bp, 1);
5369
5370                         rc = bnx2_init_nic(bp);
5371
5372                         if (!rc)
5373                                 rc = bnx2_request_irq(bp);
5374
5375                         if (rc) {
5376                                 bnx2_napi_disable(bp);
5377                                 bnx2_free_skbs(bp);
5378                                 bnx2_free_mem(bp);
5379                                 del_timer_sync(&bp->timer);
5380                                 return rc;
5381                         }
5382                         bnx2_enable_int(bp);
5383                 }
5384         }
5385         if (bp->flags & USING_MSI_FLAG) {
5386                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5387         }
5388
5389         netif_start_queue(dev);
5390
5391         return 0;
5392 }
5393
5394 static void
5395 bnx2_reset_task(struct work_struct *work)
5396 {
5397         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5398
5399         if (!netif_running(bp->dev))
5400                 return;
5401
5402         bp->in_reset_task = 1;
5403         bnx2_netif_stop(bp);
5404
5405         bnx2_init_nic(bp);
5406
5407         atomic_set(&bp->intr_sem, 1);
5408         bnx2_netif_start(bp);
5409         bp->in_reset_task = 0;
5410 }
5411
5412 static void
5413 bnx2_tx_timeout(struct net_device *dev)
5414 {
5415         struct bnx2 *bp = netdev_priv(dev);
5416
5417         /* This allows the netif to be shutdown gracefully before resetting */
5418         schedule_work(&bp->reset_task);
5419 }
5420
5421 #ifdef BCM_VLAN
5422 /* Called with rtnl_lock */
5423 static void
5424 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5425 {
5426         struct bnx2 *bp = netdev_priv(dev);
5427
5428         bnx2_netif_stop(bp);
5429
5430         bp->vlgrp = vlgrp;
5431         bnx2_set_rx_mode(dev);
5432
5433         bnx2_netif_start(bp);
5434 }
5435 #endif
5436
5437 /* Called with netif_tx_lock.
5438  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5439  * netif_wake_queue().
5440  */
5441 static int
5442 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5443 {
5444         struct bnx2 *bp = netdev_priv(dev);
5445         dma_addr_t mapping;
5446         struct tx_bd *txbd;
5447         struct sw_bd *tx_buf;
5448         u32 len, vlan_tag_flags, last_frag, mss;
5449         u16 prod, ring_prod;
5450         int i;
5451         struct bnx2_napi *bnapi = &bp->bnx2_napi;
5452
5453         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5454             (skb_shinfo(skb)->nr_frags + 1))) {
5455                 netif_stop_queue(dev);
5456                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5457                         dev->name);
5458
5459                 return NETDEV_TX_BUSY;
5460         }
5461         len = skb_headlen(skb);
5462         prod = bp->tx_prod;
5463         ring_prod = TX_RING_IDX(prod);
5464
5465         vlan_tag_flags = 0;
5466         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5467                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5468         }
5469
5470         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5471                 vlan_tag_flags |=
5472                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5473         }
5474         if ((mss = skb_shinfo(skb)->gso_size)) {
5475                 u32 tcp_opt_len, ip_tcp_len;
5476                 struct iphdr *iph;
5477
5478                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5479
5480                 tcp_opt_len = tcp_optlen(skb);
5481
5482                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5483                         u32 tcp_off = skb_transport_offset(skb) -
5484                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5485
5486                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5487                                           TX_BD_FLAGS_SW_FLAGS;
5488                         if (likely(tcp_off == 0))
5489                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5490                         else {
5491                                 tcp_off >>= 3;
5492                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5493                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5494                                                   ((tcp_off & 0x10) <<
5495                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5496                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5497                         }
5498                 } else {
5499                         if (skb_header_cloned(skb) &&
5500                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5501                                 dev_kfree_skb(skb);
5502                                 return NETDEV_TX_OK;
5503                         }
5504
5505                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5506
5507                         iph = ip_hdr(skb);
5508                         iph->check = 0;
5509                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5510                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5511                                                                  iph->daddr, 0,
5512                                                                  IPPROTO_TCP,
5513                                                                  0);
5514                         if (tcp_opt_len || (iph->ihl > 5)) {
5515                                 vlan_tag_flags |= ((iph->ihl - 5) +
5516                                                    (tcp_opt_len >> 2)) << 8;
5517                         }
5518                 }
5519         } else
5520                 mss = 0;
5521
5522         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5523
5524         tx_buf = &bp->tx_buf_ring[ring_prod];
5525         tx_buf->skb = skb;
5526         pci_unmap_addr_set(tx_buf, mapping, mapping);
5527
5528         txbd = &bp->tx_desc_ring[ring_prod];
5529
5530         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5531         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5532         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5533         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5534
5535         last_frag = skb_shinfo(skb)->nr_frags;
5536
5537         for (i = 0; i < last_frag; i++) {
5538                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5539
5540                 prod = NEXT_TX_BD(prod);
5541                 ring_prod = TX_RING_IDX(prod);
5542                 txbd = &bp->tx_desc_ring[ring_prod];
5543
5544                 len = frag->size;
5545                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5546                         len, PCI_DMA_TODEVICE);
5547                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5548                                 mapping, mapping);
5549
5550                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5551                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5552                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5553                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5554
5555         }
5556         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5557
5558         prod = NEXT_TX_BD(prod);
5559         bp->tx_prod_bseq += skb->len;
5560
5561         REG_WR16(bp, bp->tx_bidx_addr, prod);
5562         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5563
5564         mmiowb();
5565
5566         bp->tx_prod = prod;
5567         dev->trans_start = jiffies;
5568
5569         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5570                 netif_stop_queue(dev);
5571                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5572                         netif_wake_queue(dev);
5573         }
5574
5575         return NETDEV_TX_OK;
5576 }
5577
5578 /* Called with rtnl_lock */
5579 static int
5580 bnx2_close(struct net_device *dev)
5581 {
5582         struct bnx2 *bp = netdev_priv(dev);
5583         u32 reset_code;
5584
5585         /* Calling flush_scheduled_work() may deadlock because
5586          * linkwatch_event() may be on the workqueue and it will try to get
5587          * the rtnl_lock which we are holding.
5588          */
5589         while (bp->in_reset_task)
5590                 msleep(1);
5591
5592         bnx2_disable_int_sync(bp);
5593         bnx2_napi_disable(bp);
5594         del_timer_sync(&bp->timer);
5595         if (bp->flags & NO_WOL_FLAG)
5596                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5597         else if (bp->wol)
5598                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5599         else
5600                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5601         bnx2_reset_chip(bp, reset_code);
5602         bnx2_free_irq(bp);
5603         bnx2_free_skbs(bp);
5604         bnx2_free_mem(bp);
5605         bp->link_up = 0;
5606         netif_carrier_off(bp->dev);
5607         bnx2_set_power_state(bp, PCI_D3hot);
5608         return 0;
5609 }
5610
5611 #define GET_NET_STATS64(ctr)                                    \
5612         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5613         (unsigned long) (ctr##_lo)
5614
5615 #define GET_NET_STATS32(ctr)            \
5616         (ctr##_lo)
5617
5618 #if (BITS_PER_LONG == 64)
5619 #define GET_NET_STATS   GET_NET_STATS64
5620 #else
5621 #define GET_NET_STATS   GET_NET_STATS32
5622 #endif
5623
5624 static struct net_device_stats *
5625 bnx2_get_stats(struct net_device *dev)
5626 {
5627         struct bnx2 *bp = netdev_priv(dev);
5628         struct statistics_block *stats_blk = bp->stats_blk;
5629         struct net_device_stats *net_stats = &bp->net_stats;
5630
5631         if (bp->stats_blk == NULL) {
5632                 return net_stats;
5633         }
5634         net_stats->rx_packets =
5635                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5636                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5637                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5638
5639         net_stats->tx_packets =
5640                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5641                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5642                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5643
5644         net_stats->rx_bytes =
5645                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5646
5647         net_stats->tx_bytes =
5648                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5649
5650         net_stats->multicast =
5651                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5652
5653         net_stats->collisions =
5654                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5655
5656         net_stats->rx_length_errors =
5657                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5658                 stats_blk->stat_EtherStatsOverrsizePkts);
5659
5660         net_stats->rx_over_errors =
5661                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5662
5663         net_stats->rx_frame_errors =
5664                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5665
5666         net_stats->rx_crc_errors =
5667                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5668
5669         net_stats->rx_errors = net_stats->rx_length_errors +
5670                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5671                 net_stats->rx_crc_errors;
5672
5673         net_stats->tx_aborted_errors =
5674                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5675                 stats_blk->stat_Dot3StatsLateCollisions);
5676
5677         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5678             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5679                 net_stats->tx_carrier_errors = 0;
5680         else {
5681                 net_stats->tx_carrier_errors =
5682                         (unsigned long)
5683                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5684         }
5685
5686         net_stats->tx_errors =
5687                 (unsigned long)
5688                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5689                 +
5690                 net_stats->tx_aborted_errors +
5691                 net_stats->tx_carrier_errors;
5692
5693         net_stats->rx_missed_errors =
5694                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5695                 stats_blk->stat_FwRxDrop);
5696
5697         return net_stats;
5698 }
5699
5700 /* All ethtool functions called with rtnl_lock */
5701
5702 static int
5703 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5704 {
5705         struct bnx2 *bp = netdev_priv(dev);
5706         int support_serdes = 0, support_copper = 0;
5707
5708         cmd->supported = SUPPORTED_Autoneg;
5709         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5710                 support_serdes = 1;
5711                 support_copper = 1;
5712         } else if (bp->phy_port == PORT_FIBRE)
5713                 support_serdes = 1;
5714         else
5715                 support_copper = 1;
5716
5717         if (support_serdes) {
5718                 cmd->supported |= SUPPORTED_1000baseT_Full |
5719                         SUPPORTED_FIBRE;
5720                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5721                         cmd->supported |= SUPPORTED_2500baseX_Full;
5722
5723         }
5724         if (support_copper) {
5725                 cmd->supported |= SUPPORTED_10baseT_Half |
5726                         SUPPORTED_10baseT_Full |
5727                         SUPPORTED_100baseT_Half |
5728                         SUPPORTED_100baseT_Full |
5729                         SUPPORTED_1000baseT_Full |
5730                         SUPPORTED_TP;
5731
5732         }
5733
5734         spin_lock_bh(&bp->phy_lock);
5735         cmd->port = bp->phy_port;
5736         cmd->advertising = bp->advertising;
5737
5738         if (bp->autoneg & AUTONEG_SPEED) {
5739                 cmd->autoneg = AUTONEG_ENABLE;
5740         }
5741         else {
5742                 cmd->autoneg = AUTONEG_DISABLE;
5743         }
5744
5745         if (netif_carrier_ok(dev)) {
5746                 cmd->speed = bp->line_speed;
5747                 cmd->duplex = bp->duplex;
5748         }
5749         else {
5750                 cmd->speed = -1;
5751                 cmd->duplex = -1;
5752         }
5753         spin_unlock_bh(&bp->phy_lock);
5754
5755         cmd->transceiver = XCVR_INTERNAL;
5756         cmd->phy_address = bp->phy_addr;
5757
5758         return 0;
5759 }
5760
5761 static int
5762 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5763 {
5764         struct bnx2 *bp = netdev_priv(dev);
5765         u8 autoneg = bp->autoneg;
5766         u8 req_duplex = bp->req_duplex;
5767         u16 req_line_speed = bp->req_line_speed;
5768         u32 advertising = bp->advertising;
5769         int err = -EINVAL;
5770
5771         spin_lock_bh(&bp->phy_lock);
5772
5773         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5774                 goto err_out_unlock;
5775
5776         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5777                 goto err_out_unlock;
5778
5779         if (cmd->autoneg == AUTONEG_ENABLE) {
5780                 autoneg |= AUTONEG_SPEED;
5781
5782                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5783
5784                 /* allow advertising 1 speed */
5785                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5786                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5787                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5788                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5789
5790                         if (cmd->port == PORT_FIBRE)
5791                                 goto err_out_unlock;
5792
5793                         advertising = cmd->advertising;
5794
5795                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5796                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5797                             (cmd->port == PORT_TP))
5798                                 goto err_out_unlock;
5799                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5800                         advertising = cmd->advertising;
5801                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5802                         goto err_out_unlock;
5803                 else {
5804                         if (cmd->port == PORT_FIBRE)
5805                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5806                         else
5807                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5808                 }
5809                 advertising |= ADVERTISED_Autoneg;
5810         }
5811         else {
5812                 if (cmd->port == PORT_FIBRE) {
5813                         if ((cmd->speed != SPEED_1000 &&
5814                              cmd->speed != SPEED_2500) ||
5815                             (cmd->duplex != DUPLEX_FULL))
5816                                 goto err_out_unlock;
5817
5818                         if (cmd->speed == SPEED_2500 &&
5819                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5820                                 goto err_out_unlock;
5821                 }
5822                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5823                         goto err_out_unlock;
5824
5825                 autoneg &= ~AUTONEG_SPEED;
5826                 req_line_speed = cmd->speed;
5827                 req_duplex = cmd->duplex;
5828                 advertising = 0;
5829         }
5830
5831         bp->autoneg = autoneg;
5832         bp->advertising = advertising;
5833         bp->req_line_speed = req_line_speed;
5834         bp->req_duplex = req_duplex;
5835
5836         err = bnx2_setup_phy(bp, cmd->port);
5837
5838 err_out_unlock:
5839         spin_unlock_bh(&bp->phy_lock);
5840
5841         return err;
5842 }
5843
5844 static void
5845 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5846 {
5847         struct bnx2 *bp = netdev_priv(dev);
5848
5849         strcpy(info->driver, DRV_MODULE_NAME);
5850         strcpy(info->version, DRV_MODULE_VERSION);
5851         strcpy(info->bus_info, pci_name(bp->pdev));
5852         strcpy(info->fw_version, bp->fw_version);
5853 }
5854
5855 #define BNX2_REGDUMP_LEN                (32 * 1024)
5856
5857 static int
5858 bnx2_get_regs_len(struct net_device *dev)
5859 {
5860         return BNX2_REGDUMP_LEN;
5861 }
5862
5863 static void
5864 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5865 {
5866         u32 *p = _p, i, offset;
5867         u8 *orig_p = _p;
5868         struct bnx2 *bp = netdev_priv(dev);
5869         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5870                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5871                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5872                                  0x1040, 0x1048, 0x1080, 0x10a4,
5873                                  0x1400, 0x1490, 0x1498, 0x14f0,
5874                                  0x1500, 0x155c, 0x1580, 0x15dc,
5875                                  0x1600, 0x1658, 0x1680, 0x16d8,
5876                                  0x1800, 0x1820, 0x1840, 0x1854,
5877                                  0x1880, 0x1894, 0x1900, 0x1984,
5878                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5879                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5880                                  0x2000, 0x2030, 0x23c0, 0x2400,
5881                                  0x2800, 0x2820, 0x2830, 0x2850,
5882                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5883                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5884                                  0x4080, 0x4090, 0x43c0, 0x4458,
5885                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5886                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5887                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5888                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5889                                  0x6800, 0x6848, 0x684c, 0x6860,
5890                                  0x6888, 0x6910, 0x8000 };
5891
5892         regs->version = 0;
5893
5894         memset(p, 0, BNX2_REGDUMP_LEN);
5895
5896         if (!netif_running(bp->dev))
5897                 return;
5898
5899         i = 0;
5900         offset = reg_boundaries[0];
5901         p += offset;
5902         while (offset < BNX2_REGDUMP_LEN) {
5903                 *p++ = REG_RD(bp, offset);
5904                 offset += 4;
5905                 if (offset == reg_boundaries[i + 1]) {
5906                         offset = reg_boundaries[i + 2];
5907                         p = (u32 *) (orig_p + offset);
5908                         i += 2;
5909                 }
5910         }
5911 }
5912
5913 static void
5914 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5915 {
5916         struct bnx2 *bp = netdev_priv(dev);
5917
5918         if (bp->flags & NO_WOL_FLAG) {
5919                 wol->supported = 0;
5920                 wol->wolopts = 0;
5921         }
5922         else {
5923                 wol->supported = WAKE_MAGIC;
5924                 if (bp->wol)
5925                         wol->wolopts = WAKE_MAGIC;
5926                 else
5927                         wol->wolopts = 0;
5928         }
5929         memset(&wol->sopass, 0, sizeof(wol->sopass));
5930 }
5931
5932 static int
5933 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5934 {
5935         struct bnx2 *bp = netdev_priv(dev);
5936
5937         if (wol->wolopts & ~WAKE_MAGIC)
5938                 return -EINVAL;
5939
5940         if (wol->wolopts & WAKE_MAGIC) {
5941                 if (bp->flags & NO_WOL_FLAG)
5942                         return -EINVAL;
5943
5944                 bp->wol = 1;
5945         }
5946         else {
5947                 bp->wol = 0;
5948         }
5949         return 0;
5950 }
5951
5952 static int
5953 bnx2_nway_reset(struct net_device *dev)
5954 {
5955         struct bnx2 *bp = netdev_priv(dev);
5956         u32 bmcr;
5957
5958         if (!(bp->autoneg & AUTONEG_SPEED)) {
5959                 return -EINVAL;
5960         }
5961
5962         spin_lock_bh(&bp->phy_lock);
5963
5964         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5965                 int rc;
5966
5967                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5968                 spin_unlock_bh(&bp->phy_lock);
5969                 return rc;
5970         }
5971
5972         /* Force a link down visible on the other side */
5973         if (bp->phy_flags & PHY_SERDES_FLAG) {
5974                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5975                 spin_unlock_bh(&bp->phy_lock);
5976
5977                 msleep(20);
5978
5979                 spin_lock_bh(&bp->phy_lock);
5980
5981                 bp->current_interval = SERDES_AN_TIMEOUT;
5982                 bp->serdes_an_pending = 1;
5983                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5984         }
5985
5986         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5987         bmcr &= ~BMCR_LOOPBACK;
5988         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5989
5990         spin_unlock_bh(&bp->phy_lock);
5991
5992         return 0;
5993 }
5994
5995 static int
5996 bnx2_get_eeprom_len(struct net_device *dev)
5997 {
5998         struct bnx2 *bp = netdev_priv(dev);
5999
6000         if (bp->flash_info == NULL)
6001                 return 0;
6002
6003         return (int) bp->flash_size;
6004 }
6005
6006 static int
6007 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6008                 u8 *eebuf)
6009 {
6010         struct bnx2 *bp = netdev_priv(dev);
6011         int rc;
6012
6013         /* parameters already validated in ethtool_get_eeprom */
6014
6015         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6016
6017         return rc;
6018 }
6019
6020 static int
6021 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6022                 u8 *eebuf)
6023 {
6024         struct bnx2 *bp = netdev_priv(dev);
6025         int rc;
6026
6027         /* parameters already validated in ethtool_set_eeprom */
6028
6029         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6030
6031         return rc;
6032 }
6033
6034 static int
6035 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6036 {
6037         struct bnx2 *bp = netdev_priv(dev);
6038
6039         memset(coal, 0, sizeof(struct ethtool_coalesce));
6040
6041         coal->rx_coalesce_usecs = bp->rx_ticks;
6042         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6043         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6044         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6045
6046         coal->tx_coalesce_usecs = bp->tx_ticks;
6047         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6048         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6049         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6050
6051         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6052
6053         return 0;
6054 }
6055
6056 static int
6057 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6058 {
6059         struct bnx2 *bp = netdev_priv(dev);
6060
6061         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6062         if (bp->rx_ticks > 0x3ff) bp->rx_ticks&nbs