[BNX2]: Enable S/G for jumbo RX.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.9"
60 #define DRV_MODULE_RELDATE      "December 8, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486         for (i = 0; i < bp->rx_max_pg_ring; i++) {
487                 if (bp->rx_pg_desc_ring[i])
488                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489                                             bp->rx_pg_desc_ring[i],
490                                             bp->rx_pg_desc_mapping[i]);
491                 bp->rx_pg_desc_ring[i] = NULL;
492         }
493         if (bp->rx_pg_ring)
494                 vfree(bp->rx_pg_ring);
495         bp->rx_pg_ring = NULL;
496 }
497
498 static int
499 bnx2_alloc_mem(struct bnx2 *bp)
500 {
501         int i, status_blk_size;
502
503         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504         if (bp->tx_buf_ring == NULL)
505                 return -ENOMEM;
506
507         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508                                                 &bp->tx_desc_mapping);
509         if (bp->tx_desc_ring == NULL)
510                 goto alloc_mem_err;
511
512         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513         if (bp->rx_buf_ring == NULL)
514                 goto alloc_mem_err;
515
516         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
517
518         for (i = 0; i < bp->rx_max_ring; i++) {
519                 bp->rx_desc_ring[i] =
520                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521                                              &bp->rx_desc_mapping[i]);
522                 if (bp->rx_desc_ring[i] == NULL)
523                         goto alloc_mem_err;
524
525         }
526
527         if (bp->rx_pg_ring_size) {
528                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529                                          bp->rx_max_pg_ring);
530                 if (bp->rx_pg_ring == NULL)
531                         goto alloc_mem_err;
532
533                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534                        bp->rx_max_pg_ring);
535         }
536
537         for (i = 0; i < bp->rx_max_pg_ring; i++) {
538                 bp->rx_pg_desc_ring[i] =
539                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540                                              &bp->rx_pg_desc_mapping[i]);
541                 if (bp->rx_pg_desc_ring[i] == NULL)
542                         goto alloc_mem_err;
543
544         }
545
546         /* Combine status and statistics blocks into one allocation. */
547         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548         bp->status_stats_size = status_blk_size +
549                                 sizeof(struct statistics_block);
550
551         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552                                               &bp->status_blk_mapping);
553         if (bp->status_blk == NULL)
554                 goto alloc_mem_err;
555
556         memset(bp->status_blk, 0, bp->status_stats_size);
557
558         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559                                   status_blk_size);
560
561         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
562
563         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565                 if (bp->ctx_pages == 0)
566                         bp->ctx_pages = 1;
567                 for (i = 0; i < bp->ctx_pages; i++) {
568                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569                                                 BCM_PAGE_SIZE,
570                                                 &bp->ctx_blk_mapping[i]);
571                         if (bp->ctx_blk[i] == NULL)
572                                 goto alloc_mem_err;
573                 }
574         }
575         return 0;
576
577 alloc_mem_err:
578         bnx2_free_mem(bp);
579         return -ENOMEM;
580 }
581
582 static void
583 bnx2_report_fw_link(struct bnx2 *bp)
584 {
585         u32 fw_link_status = 0;
586
587         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588                 return;
589
590         if (bp->link_up) {
591                 u32 bmsr;
592
593                 switch (bp->line_speed) {
594                 case SPEED_10:
595                         if (bp->duplex == DUPLEX_HALF)
596                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
597                         else
598                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
599                         break;
600                 case SPEED_100:
601                         if (bp->duplex == DUPLEX_HALF)
602                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
603                         else
604                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
605                         break;
606                 case SPEED_1000:
607                         if (bp->duplex == DUPLEX_HALF)
608                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609                         else
610                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611                         break;
612                 case SPEED_2500:
613                         if (bp->duplex == DUPLEX_HALF)
614                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615                         else
616                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617                         break;
618                 }
619
620                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622                 if (bp->autoneg) {
623                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
625                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
627
628                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631                         else
632                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633                 }
634         }
635         else
636                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639 }
640
641 static char *
642 bnx2_xceiver_str(struct bnx2 *bp)
643 {
644         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646                  "Copper"));
647 }
648
649 static void
650 bnx2_report_link(struct bnx2 *bp)
651 {
652         if (bp->link_up) {
653                 netif_carrier_on(bp->dev);
654                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655                        bnx2_xceiver_str(bp));
656
657                 printk("%d Mbps ", bp->line_speed);
658
659                 if (bp->duplex == DUPLEX_FULL)
660                         printk("full duplex");
661                 else
662                         printk("half duplex");
663
664                 if (bp->flow_ctrl) {
665                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
666                                 printk(", receive ");
667                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
668                                         printk("& transmit ");
669                         }
670                         else {
671                                 printk(", transmit ");
672                         }
673                         printk("flow control ON");
674                 }
675                 printk("\n");
676         }
677         else {
678                 netif_carrier_off(bp->dev);
679                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680                        bnx2_xceiver_str(bp));
681         }
682
683         bnx2_report_fw_link(bp);
684 }
685
686 static void
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688 {
689         u32 local_adv, remote_adv;
690
691         bp->flow_ctrl = 0;
692         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695                 if (bp->duplex == DUPLEX_FULL) {
696                         bp->flow_ctrl = bp->req_flow_ctrl;
697                 }
698                 return;
699         }
700
701         if (bp->duplex != DUPLEX_FULL) {
702                 return;
703         }
704
705         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707                 u32 val;
708
709                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711                         bp->flow_ctrl |= FLOW_CTRL_TX;
712                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713                         bp->flow_ctrl |= FLOW_CTRL_RX;
714                 return;
715         }
716
717         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
719
720         if (bp->phy_flags & PHY_SERDES_FLAG) {
721                 u32 new_local_adv = 0;
722                 u32 new_remote_adv = 0;
723
724                 if (local_adv & ADVERTISE_1000XPAUSE)
725                         new_local_adv |= ADVERTISE_PAUSE_CAP;
726                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
728                 if (remote_adv & ADVERTISE_1000XPAUSE)
729                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
730                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733                 local_adv = new_local_adv;
734                 remote_adv = new_remote_adv;
735         }
736
737         /* See Table 28B-3 of 802.3ab-1999 spec. */
738         if (local_adv & ADVERTISE_PAUSE_CAP) {
739                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
741                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742                         }
743                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744                                 bp->flow_ctrl = FLOW_CTRL_RX;
745                         }
746                 }
747                 else {
748                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
749                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750                         }
751                 }
752         }
753         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757                         bp->flow_ctrl = FLOW_CTRL_TX;
758                 }
759         }
760 }
761
762 static int
763 bnx2_5709s_linkup(struct bnx2 *bp)
764 {
765         u32 val, speed;
766
767         bp->link_up = 1;
768
769         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774                 bp->line_speed = bp->req_line_speed;
775                 bp->duplex = bp->req_duplex;
776                 return 0;
777         }
778         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779         switch (speed) {
780                 case MII_BNX2_GP_TOP_AN_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case MII_BNX2_GP_TOP_AN_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788                         bp->line_speed = SPEED_1000;
789                         break;
790                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791                         bp->line_speed = SPEED_2500;
792                         break;
793         }
794         if (val & MII_BNX2_GP_TOP_AN_FD)
795                 bp->duplex = DUPLEX_FULL;
796         else
797                 bp->duplex = DUPLEX_HALF;
798         return 0;
799 }
800
801 static int
802 bnx2_5708s_linkup(struct bnx2 *bp)
803 {
804         u32 val;
805
806         bp->link_up = 1;
807         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809                 case BCM5708S_1000X_STAT1_SPEED_10:
810                         bp->line_speed = SPEED_10;
811                         break;
812                 case BCM5708S_1000X_STAT1_SPEED_100:
813                         bp->line_speed = SPEED_100;
814                         break;
815                 case BCM5708S_1000X_STAT1_SPEED_1G:
816                         bp->line_speed = SPEED_1000;
817                         break;
818                 case BCM5708S_1000X_STAT1_SPEED_2G5:
819                         bp->line_speed = SPEED_2500;
820                         break;
821         }
822         if (val & BCM5708S_1000X_STAT1_FD)
823                 bp->duplex = DUPLEX_FULL;
824         else
825                 bp->duplex = DUPLEX_HALF;
826
827         return 0;
828 }
829
830 static int
831 bnx2_5706s_linkup(struct bnx2 *bp)
832 {
833         u32 bmcr, local_adv, remote_adv, common;
834
835         bp->link_up = 1;
836         bp->line_speed = SPEED_1000;
837
838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839         if (bmcr & BMCR_FULLDPLX) {
840                 bp->duplex = DUPLEX_FULL;
841         }
842         else {
843                 bp->duplex = DUPLEX_HALF;
844         }
845
846         if (!(bmcr & BMCR_ANENABLE)) {
847                 return 0;
848         }
849
850         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
852
853         common = local_adv & remote_adv;
854         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856                 if (common & ADVERTISE_1000XFULL) {
857                         bp->duplex = DUPLEX_FULL;
858                 }
859                 else {
860                         bp->duplex = DUPLEX_HALF;
861                 }
862         }
863
864         return 0;
865 }
866
867 static int
868 bnx2_copper_linkup(struct bnx2 *bp)
869 {
870         u32 bmcr;
871
872         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873         if (bmcr & BMCR_ANENABLE) {
874                 u32 local_adv, remote_adv, common;
875
876                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879                 common = local_adv & (remote_adv >> 2);
880                 if (common & ADVERTISE_1000FULL) {
881                         bp->line_speed = SPEED_1000;
882                         bp->duplex = DUPLEX_FULL;
883                 }
884                 else if (common & ADVERTISE_1000HALF) {
885                         bp->line_speed = SPEED_1000;
886                         bp->duplex = DUPLEX_HALF;
887                 }
888                 else {
889                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
891
892                         common = local_adv & remote_adv;
893                         if (common & ADVERTISE_100FULL) {
894                                 bp->line_speed = SPEED_100;
895                                 bp->duplex = DUPLEX_FULL;
896                         }
897                         else if (common & ADVERTISE_100HALF) {
898                                 bp->line_speed = SPEED_100;
899                                 bp->duplex = DUPLEX_HALF;
900                         }
901                         else if (common & ADVERTISE_10FULL) {
902                                 bp->line_speed = SPEED_10;
903                                 bp->duplex = DUPLEX_FULL;
904                         }
905                         else if (common & ADVERTISE_10HALF) {
906                                 bp->line_speed = SPEED_10;
907                                 bp->duplex = DUPLEX_HALF;
908                         }
909                         else {
910                                 bp->line_speed = 0;
911                                 bp->link_up = 0;
912                         }
913                 }
914         }
915         else {
916                 if (bmcr & BMCR_SPEED100) {
917                         bp->line_speed = SPEED_100;
918                 }
919                 else {
920                         bp->line_speed = SPEED_10;
921                 }
922                 if (bmcr & BMCR_FULLDPLX) {
923                         bp->duplex = DUPLEX_FULL;
924                 }
925                 else {
926                         bp->duplex = DUPLEX_HALF;
927                 }
928         }
929
930         return 0;
931 }
932
933 static int
934 bnx2_set_mac_link(struct bnx2 *bp)
935 {
936         u32 val;
937
938         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940                 (bp->duplex == DUPLEX_HALF)) {
941                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942         }
943
944         /* Configure the EMAC mode register. */
945         val = REG_RD(bp, BNX2_EMAC_MODE);
946
947         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949                 BNX2_EMAC_MODE_25G_MODE);
950
951         if (bp->link_up) {
952                 switch (bp->line_speed) {
953                         case SPEED_10:
954                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
956                                         break;
957                                 }
958                                 /* fall through */
959                         case SPEED_100:
960                                 val |= BNX2_EMAC_MODE_PORT_MII;
961                                 break;
962                         case SPEED_2500:
963                                 val |= BNX2_EMAC_MODE_25G_MODE;
964                                 /* fall through */
965                         case SPEED_1000:
966                                 val |= BNX2_EMAC_MODE_PORT_GMII;
967                                 break;
968                 }
969         }
970         else {
971                 val |= BNX2_EMAC_MODE_PORT_GMII;
972         }
973
974         /* Set the MAC to operate in the appropriate duplex mode. */
975         if (bp->duplex == DUPLEX_HALF)
976                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977         REG_WR(bp, BNX2_EMAC_MODE, val);
978
979         /* Enable/disable rx PAUSE. */
980         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982         if (bp->flow_ctrl & FLOW_CTRL_RX)
983                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986         /* Enable/disable tx PAUSE. */
987         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990         if (bp->flow_ctrl & FLOW_CTRL_TX)
991                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994         /* Acknowledge the interrupt. */
995         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997         return 0;
998 }
999
1000 static void
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1002 {
1003         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004             (CHIP_NUM(bp) == CHIP_NUM_5709))
1005                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006                                MII_BNX2_BLK_ADDR_GP_STATUS);
1007 }
1008
1009 static void
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1011 {
1012         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013             (CHIP_NUM(bp) == CHIP_NUM_5709))
1014                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016 }
1017
1018 static int
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 1;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (bp->autoneg & AUTONEG_SPEED)
1028                 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
1030         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
1033         bnx2_read_phy(bp, bp->mii_up1, &up1);
1034         if (!(up1 & BCM5708S_UP1_2G5)) {
1035                 up1 |= BCM5708S_UP1_2G5;
1036                 bnx2_write_phy(bp, bp->mii_up1, up1);
1037                 ret = 0;
1038         }
1039
1040         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
1044         return ret;
1045 }
1046
1047 static int
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049 {
1050         u32 up1;
1051         int ret = 0;
1052
1053         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054                 return 0;
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
1059         bnx2_read_phy(bp, bp->mii_up1, &up1);
1060         if (up1 & BCM5708S_UP1_2G5) {
1061                 up1 &= ~BCM5708S_UP1_2G5;
1062                 bnx2_write_phy(bp, bp->mii_up1, up1);
1063                 ret = 1;
1064         }
1065
1066         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
1070         return ret;
1071 }
1072
1073 static void
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1075 {
1076         u32 bmcr;
1077
1078         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079                 return;
1080
1081         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082                 u32 val;
1083
1084                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1086                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098         }
1099
1100         if (bp->autoneg & AUTONEG_SPEED) {
1101                 bmcr &= ~BMCR_ANENABLE;
1102                 if (bp->req_duplex == DUPLEX_FULL)
1103                         bmcr |= BMCR_FULLDPLX;
1104         }
1105         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 }
1107
1108 static void
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1110 {
1111         u32 bmcr;
1112
1113         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114                 return;
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117                 u32 val;
1118
1119                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1121                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132         }
1133
1134         if (bp->autoneg & AUTONEG_SPEED)
1135                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137 }
1138
1139 static int
1140 bnx2_set_link(struct bnx2 *bp)
1141 {
1142         u32 bmsr;
1143         u8 link_up;
1144
1145         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1146                 bp->link_up = 1;
1147                 return 0;
1148         }
1149
1150         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151                 return 0;
1152
1153         link_up = bp->link_up;
1154
1155         bnx2_enable_bmsr1(bp);
1156         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158         bnx2_disable_bmsr1(bp);
1159
1160         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162                 u32 val;
1163
1164                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165                 if (val & BNX2_EMAC_STATUS_LINK)
1166                         bmsr |= BMSR_LSTATUS;
1167                 else
1168                         bmsr &= ~BMSR_LSTATUS;
1169         }
1170
1171         if (bmsr & BMSR_LSTATUS) {
1172                 bp->link_up = 1;
1173
1174                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176                                 bnx2_5706s_linkup(bp);
1177                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178                                 bnx2_5708s_linkup(bp);
1179                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180                                 bnx2_5709s_linkup(bp);
1181                 }
1182                 else {
1183                         bnx2_copper_linkup(bp);
1184                 }
1185                 bnx2_resolve_flow_ctrl(bp);
1186         }
1187         else {
1188                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189                     (bp->autoneg & AUTONEG_SPEED))
1190                         bnx2_disable_forced_2g5(bp);
1191
1192                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193                 bp->link_up = 0;
1194         }
1195
1196         if (bp->link_up != link_up) {
1197                 bnx2_report_link(bp);
1198         }
1199
1200         bnx2_set_mac_link(bp);
1201
1202         return 0;
1203 }
1204
1205 static int
1206 bnx2_reset_phy(struct bnx2 *bp)
1207 {
1208         int i;
1209         u32 reg;
1210
1211         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1212
1213 #define PHY_RESET_MAX_WAIT 100
1214         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215                 udelay(10);
1216
1217                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1218                 if (!(reg & BMCR_RESET)) {
1219                         udelay(20);
1220                         break;
1221                 }
1222         }
1223         if (i == PHY_RESET_MAX_WAIT) {
1224                 return -EBUSY;
1225         }
1226         return 0;
1227 }
1228
1229 static u32
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231 {
1232         u32 adv = 0;
1233
1234         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238                         adv = ADVERTISE_1000XPAUSE;
1239                 }
1240                 else {
1241                         adv = ADVERTISE_PAUSE_CAP;
1242                 }
1243         }
1244         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246                         adv = ADVERTISE_1000XPSE_ASYM;
1247                 }
1248                 else {
1249                         adv = ADVERTISE_PAUSE_ASYM;
1250                 }
1251         }
1252         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258                 }
1259         }
1260         return adv;
1261 }
1262
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
1265 static int
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267 {
1268         u32 speed_arg = 0, pause_adv;
1269
1270         pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272         if (bp->autoneg & AUTONEG_SPEED) {
1273                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274                 if (bp->advertising & ADVERTISED_10baseT_Half)
1275                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276                 if (bp->advertising & ADVERTISED_10baseT_Full)
1277                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278                 if (bp->advertising & ADVERTISED_100baseT_Half)
1279                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280                 if (bp->advertising & ADVERTISED_100baseT_Full)
1281                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286         } else {
1287                 if (bp->req_line_speed == SPEED_2500)
1288                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289                 else if (bp->req_line_speed == SPEED_1000)
1290                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291                 else if (bp->req_line_speed == SPEED_100) {
1292                         if (bp->req_duplex == DUPLEX_FULL)
1293                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294                         else
1295                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 } else if (bp->req_line_speed == SPEED_10) {
1297                         if (bp->req_duplex == DUPLEX_FULL)
1298                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299                         else
1300                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301                 }
1302         }
1303
1304         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309         if (port == PORT_TP)
1310                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315         spin_unlock_bh(&bp->phy_lock);
1316         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317         spin_lock_bh(&bp->phy_lock);
1318
1319         return 0;
1320 }
1321
1322 static int
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1324 {
1325         u32 adv, bmcr;
1326         u32 new_adv = 0;
1327
1328         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329                 return (bnx2_setup_remote_phy(bp, port));
1330
1331         if (!(bp->autoneg & AUTONEG_SPEED)) {
1332                 u32 new_bmcr;
1333                 int force_link_down = 0;
1334
1335                 if (bp->req_line_speed == SPEED_2500) {
1336                         if (!bnx2_test_and_enable_2g5(bp))
1337                                 force_link_down = 1;
1338                 } else if (bp->req_line_speed == SPEED_1000) {
1339                         if (bnx2_test_and_disable_2g5(bp))
1340                                 force_link_down = 1;
1341                 }
1342                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
1345                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347                 new_bmcr |= BMCR_SPEED1000;
1348
1349                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350                         if (bp->req_line_speed == SPEED_2500)
1351                                 bnx2_enable_forced_2g5(bp);
1352                         else if (bp->req_line_speed == SPEED_1000) {
1353                                 bnx2_disable_forced_2g5(bp);
1354                                 new_bmcr &= ~0x2000;
1355                         }
1356
1357                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358                         if (bp->req_line_speed == SPEED_2500)
1359                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360                         else
1361                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1362                 }
1363
1364                 if (bp->req_duplex == DUPLEX_FULL) {
1365                         adv |= ADVERTISE_1000XFULL;
1366                         new_bmcr |= BMCR_FULLDPLX;
1367                 }
1368                 else {
1369                         adv |= ADVERTISE_1000XHALF;
1370                         new_bmcr &= ~BMCR_FULLDPLX;
1371                 }
1372                 if ((new_bmcr != bmcr) || (force_link_down)) {
1373                         /* Force a link down visible on the other side */
1374                         if (bp->link_up) {
1375                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1376                                                ~(ADVERTISE_1000XFULL |
1377                                                  ADVERTISE_1000XHALF));
1378                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379                                         BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381                                 bp->link_up = 0;
1382                                 netif_carrier_off(bp->dev);
1383                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384                                 bnx2_report_link(bp);
1385                         }
1386                         bnx2_write_phy(bp, bp->mii_adv, adv);
1387                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1388                 } else {
1389                         bnx2_resolve_flow_ctrl(bp);
1390                         bnx2_set_mac_link(bp);
1391                 }
1392                 return 0;
1393         }
1394
1395         bnx2_test_and_enable_2g5(bp);
1396
1397         if (bp->advertising & ADVERTISED_1000baseT_Full)
1398                 new_adv |= ADVERTISE_1000XFULL;
1399
1400         new_adv |= bnx2_phy_get_pause_adv(bp);
1401
1402         bnx2_read_phy(bp, bp->mii_adv, &adv);
1403         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1404
1405         bp->serdes_an_pending = 0;
1406         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407                 /* Force a link down visible on the other side */
1408                 if (bp->link_up) {
1409                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410                         spin_unlock_bh(&bp->phy_lock);
1411                         msleep(20);
1412                         spin_lock_bh(&bp->phy_lock);
1413                 }
1414
1415                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1417                         BMCR_ANENABLE);
1418                 /* Speed up link-up time when the link partner
1419                  * does not autonegotiate which is very common
1420                  * in blade servers. Some blade servers use
1421                  * IPMI for kerboard input and it's important
1422                  * to minimize link disruptions. Autoneg. involves
1423                  * exchanging base pages plus 3 next pages and
1424                  * normally completes in about 120 msec.
1425                  */
1426                 bp->current_interval = SERDES_AN_TIMEOUT;
1427                 bp->serdes_an_pending = 1;
1428                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1429         } else {
1430                 bnx2_resolve_flow_ctrl(bp);
1431                 bnx2_set_mac_link(bp);
1432         }
1433
1434         return 0;
1435 }
1436
1437 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1438         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1439                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440                 (ADVERTISED_1000baseT_Full)
1441
1442 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1443         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1444         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1445         ADVERTISED_1000baseT_Full)
1446
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1449
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
1452 static void
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1454 {
1455         u32 link;
1456
1457         if (bp->phy_port == PORT_TP)
1458                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459         else
1460                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463                 bp->req_line_speed = 0;
1464                 bp->autoneg |= AUTONEG_SPEED;
1465                 bp->advertising = ADVERTISED_Autoneg;
1466                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467                         bp->advertising |= ADVERTISED_10baseT_Half;
1468                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469                         bp->advertising |= ADVERTISED_10baseT_Full;
1470                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471                         bp->advertising |= ADVERTISED_100baseT_Half;
1472                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473                         bp->advertising |= ADVERTISED_100baseT_Full;
1474                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475                         bp->advertising |= ADVERTISED_1000baseT_Full;
1476                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477                         bp->advertising |= ADVERTISED_2500baseX_Full;
1478         } else {
1479                 bp->autoneg = 0;
1480                 bp->advertising = 0;
1481                 bp->req_duplex = DUPLEX_FULL;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483                         bp->req_line_speed = SPEED_10;
1484                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485                                 bp->req_duplex = DUPLEX_HALF;
1486                 }
1487                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488                         bp->req_line_speed = SPEED_100;
1489                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490                                 bp->req_duplex = DUPLEX_HALF;
1491                 }
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493                         bp->req_line_speed = SPEED_1000;
1494                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495                         bp->req_line_speed = SPEED_2500;
1496         }
1497 }
1498
1499 static void
1500 bnx2_set_default_link(struct bnx2 *bp)
1501 {
1502         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503                 return bnx2_set_default_remote_link(bp);
1504
1505         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506         bp->req_line_speed = 0;
1507         if (bp->phy_flags & PHY_SERDES_FLAG) {
1508                 u32 reg;
1509
1510                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515                         bp->autoneg = 0;
1516                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1517                         bp->req_duplex = DUPLEX_FULL;
1518                 }
1519         } else
1520                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521 }
1522
1523 static void
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1525 {
1526         u32 msg;
1527         u32 addr;
1528
1529         spin_lock(&bp->indirect_lock);
1530         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534         spin_unlock(&bp->indirect_lock);
1535 }
1536
1537 static void
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1539 {
1540         u32 msg;
1541         u8 link_up = bp->link_up;
1542         u8 old_port;
1543
1544         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
1546         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547                 bnx2_send_heart_beat(bp);
1548
1549         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
1551         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552                 bp->link_up = 0;
1553         else {
1554                 u32 speed;
1555
1556                 bp->link_up = 1;
1557                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558                 bp->duplex = DUPLEX_FULL;
1559                 switch (speed) {
1560                         case BNX2_LINK_STATUS_10HALF:
1561                                 bp->duplex = DUPLEX_HALF;
1562                         case BNX2_LINK_STATUS_10FULL:
1563                                 bp->line_speed = SPEED_10;
1564                                 break;
1565                         case BNX2_LINK_STATUS_100HALF:
1566                                 bp->duplex = DUPLEX_HALF;
1567                         case BNX2_LINK_STATUS_100BASE_T4:
1568                         case BNX2_LINK_STATUS_100FULL:
1569                                 bp->line_speed = SPEED_100;
1570                                 break;
1571                         case BNX2_LINK_STATUS_1000HALF:
1572                                 bp->duplex = DUPLEX_HALF;
1573                         case BNX2_LINK_STATUS_1000FULL:
1574                                 bp->line_speed = SPEED_1000;
1575                                 break;
1576                         case BNX2_LINK_STATUS_2500HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_2500FULL:
1579                                 bp->line_speed = SPEED_2500;
1580                                 break;
1581                         default:
1582                                 bp->line_speed = 0;
1583                                 break;
1584                 }
1585
1586                 spin_lock(&bp->phy_lock);
1587                 bp->flow_ctrl = 0;
1588                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590                         if (bp->duplex == DUPLEX_FULL)
1591                                 bp->flow_ctrl = bp->req_flow_ctrl;
1592                 } else {
1593                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1595                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1597                 }
1598
1599                 old_port = bp->phy_port;
1600                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601                         bp->phy_port = PORT_FIBRE;
1602                 else
1603                         bp->phy_port = PORT_TP;
1604
1605                 if (old_port != bp->phy_port)
1606                         bnx2_set_default_link(bp);
1607
1608                 spin_unlock(&bp->phy_lock);
1609         }
1610         if (bp->link_up != link_up)
1611                 bnx2_report_link(bp);
1612
1613         bnx2_set_mac_link(bp);
1614 }
1615
1616 static int
1617 bnx2_set_remote_link(struct bnx2 *bp)
1618 {
1619         u32 evt_code;
1620
1621         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622         switch (evt_code) {
1623                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624                         bnx2_remote_phy_event(bp);
1625                         break;
1626                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627                 default:
1628                         bnx2_send_heart_beat(bp);
1629                         break;
1630         }
1631         return 0;
1632 }
1633
1634 static int
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1636 {
1637         u32 bmcr;
1638         u32 new_bmcr;
1639
1640         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1641
1642         if (bp->autoneg & AUTONEG_SPEED) {
1643                 u32 adv_reg, adv1000_reg;
1644                 u32 new_adv_reg = 0;
1645                 u32 new_adv1000_reg = 0;
1646
1647                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649                         ADVERTISE_PAUSE_ASYM);
1650
1651                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652                 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654                 if (bp->advertising & ADVERTISED_10baseT_Half)
1655                         new_adv_reg |= ADVERTISE_10HALF;
1656                 if (bp->advertising & ADVERTISED_10baseT_Full)
1657                         new_adv_reg |= ADVERTISE_10FULL;
1658                 if (bp->advertising & ADVERTISED_100baseT_Half)
1659                         new_adv_reg |= ADVERTISE_100HALF;
1660                 if (bp->advertising & ADVERTISED_100baseT_Full)
1661                         new_adv_reg |= ADVERTISE_100FULL;
1662                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663                         new_adv1000_reg |= ADVERTISE_1000FULL;
1664
1665                 new_adv_reg |= ADVERTISE_CSMA;
1666
1667                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669                 if ((adv1000_reg != new_adv1000_reg) ||
1670                         (adv_reg != new_adv_reg) ||
1671                         ((bmcr & BMCR_ANENABLE) == 0)) {
1672
1673                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1676                                 BMCR_ANENABLE);
1677                 }
1678                 else if (bp->link_up) {
1679                         /* Flow ctrl may have changed from auto to forced */
1680                         /* or vice-versa. */
1681
1682                         bnx2_resolve_flow_ctrl(bp);
1683                         bnx2_set_mac_link(bp);
1684                 }
1685                 return 0;
1686         }
1687
1688         new_bmcr = 0;
1689         if (bp->req_line_speed == SPEED_100) {
1690                 new_bmcr |= BMCR_SPEED100;
1691         }
1692         if (bp->req_duplex == DUPLEX_FULL) {
1693                 new_bmcr |= BMCR_FULLDPLX;
1694         }
1695         if (new_bmcr != bmcr) {
1696                 u32 bmsr;
1697
1698                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1700
1701                 if (bmsr & BMSR_LSTATUS) {
1702                         /* Force link down */
1703                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704                         spin_unlock_bh(&bp->phy_lock);
1705                         msleep(50);
1706                         spin_lock_bh(&bp->phy_lock);
1707
1708                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1710                 }
1711
1712                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1713
1714                 /* Normally, the new speed is setup after the link has
1715                  * gone down and up again. In some cases, link will not go
1716                  * down so we need to set up the new speed here.
1717                  */
1718                 if (bmsr & BMSR_LSTATUS) {
1719                         bp->line_speed = bp->req_line_speed;
1720                         bp->duplex = bp->req_duplex;
1721                         bnx2_resolve_flow_ctrl(bp);
1722                         bnx2_set_mac_link(bp);
1723                 }
1724         } else {
1725                 bnx2_resolve_flow_ctrl(bp);
1726                 bnx2_set_mac_link(bp);
1727         }
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1733 {
1734         if (bp->loopback == MAC_LOOPBACK)
1735                 return 0;
1736
1737         if (bp->phy_flags & PHY_SERDES_FLAG) {
1738                 return (bnx2_setup_serdes_phy(bp, port));
1739         }
1740         else {
1741                 return (bnx2_setup_copper_phy(bp));
1742         }
1743 }
1744
1745 static int
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1747 {
1748         u32 val;
1749
1750         bp->mii_bmcr = MII_BMCR + 0x10;
1751         bp->mii_bmsr = MII_BMSR + 0x10;
1752         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753         bp->mii_adv = MII_ADVERTISE + 0x10;
1754         bp->mii_lpa = MII_LPA + 0x10;
1755         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761         bnx2_reset_phy(bp);
1762
1763         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773                 val |= BCM5708S_UP1_2G5;
1774         else
1775                 val &= ~BCM5708S_UP1_2G5;
1776         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791         return 0;
1792 }
1793
1794 static int
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1796 {
1797         u32 val;
1798
1799         bnx2_reset_phy(bp);
1800
1801         bp->mii_up1 = BCM5708S_UP1;
1802
1803         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817                 val |= BCM5708S_UP1_2G5;
1818                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819         }
1820
1821         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824                 /* increase tx signal amplitude */
1825                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826                                BCM5708S_BLK_ADDR_TX_MISC);
1827                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831         }
1832
1833         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836         if (val) {
1837                 u32 is_backplane;
1838
1839                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840                                           BNX2_SHARED_HW_CFG_CONFIG);
1841                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843                                        BCM5708S_BLK_ADDR_TX_MISC);
1844                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846                                        BCM5708S_BLK_ADDR_DIG);
1847                 }
1848         }
1849         return 0;
1850 }
1851
1852 static int
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1854 {
1855         bnx2_reset_phy(bp);
1856
1857         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
1859         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1861
1862         if (bp->dev->mtu > 1500) {
1863                 u32 val;
1864
1865                 /* Set extended packet length bit */
1866                 bnx2_write_phy(bp, 0x18, 0x7);
1867                 bnx2_read_phy(bp, 0x18, &val);
1868                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871                 bnx2_read_phy(bp, 0x1c, &val);
1872                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873         }
1874         else {
1875                 u32 val;
1876
1877                 bnx2_write_phy(bp, 0x18, 0x7);
1878                 bnx2_read_phy(bp, 0x18, &val);
1879                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882                 bnx2_read_phy(bp, 0x1c, &val);
1883                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884         }
1885
1886         return 0;
1887 }
1888
1889 static int
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1891 {
1892         u32 val;
1893
1894         bnx2_reset_phy(bp);
1895
1896         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897                 bnx2_write_phy(bp, 0x18, 0x0c00);
1898                 bnx2_write_phy(bp, 0x17, 0x000a);
1899                 bnx2_write_phy(bp, 0x15, 0x310b);
1900                 bnx2_write_phy(bp, 0x17, 0x201f);
1901                 bnx2_write_phy(bp, 0x15, 0x9506);
1902                 bnx2_write_phy(bp, 0x17, 0x401f);
1903                 bnx2_write_phy(bp, 0x15, 0x14e2);
1904                 bnx2_write_phy(bp, 0x18, 0x0400);
1905         }
1906
1907         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1910                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911                 val &= ~(1 << 8);
1912                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913         }
1914
1915         if (bp->dev->mtu > 1500) {
1916                 /* Set extended packet length bit */
1917                 bnx2_write_phy(bp, 0x18, 0x7);
1918                 bnx2_read_phy(bp, 0x18, &val);
1919                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921                 bnx2_read_phy(bp, 0x10, &val);
1922                 bnx2_write_phy(bp, 0x10, val | 0x1);
1923         }
1924         else {
1925                 bnx2_write_phy(bp, 0x18, 0x7);
1926                 bnx2_read_phy(bp, 0x18, &val);
1927                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929                 bnx2_read_phy(bp, 0x10, &val);
1930                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931         }
1932
1933         /* ethernet@wirespeed */
1934         bnx2_write_phy(bp, 0x18, 0x7007);
1935         bnx2_read_phy(bp, 0x18, &val);
1936         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1937         return 0;
1938 }
1939
1940
1941 static int
1942 bnx2_init_phy(struct bnx2 *bp)
1943 {
1944         u32 val;
1945         int rc = 0;
1946
1947         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
1950         bp->mii_bmcr = MII_BMCR;
1951         bp->mii_bmsr = MII_BMSR;
1952         bp->mii_bmsr1 = MII_BMSR;
1953         bp->mii_adv = MII_ADVERTISE;
1954         bp->mii_lpa = MII_LPA;
1955
1956         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
1958         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959                 goto setup_phy;
1960
1961         bnx2_read_phy(bp, MII_PHYSID1, &val);
1962         bp->phy_id = val << 16;
1963         bnx2_read_phy(bp, MII_PHYSID2, &val);
1964         bp->phy_id |= val & 0xffff;
1965
1966         if (bp->phy_flags & PHY_SERDES_FLAG) {
1967                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968                         rc = bnx2_init_5706s_phy(bp);
1969                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970                         rc = bnx2_init_5708s_phy(bp);
1971                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972                         rc = bnx2_init_5709s_phy(bp);
1973         }
1974         else {
1975                 rc = bnx2_init_copper_phy(bp);
1976         }
1977
1978 setup_phy:
1979         if (!rc)
1980                 rc = bnx2_setup_phy(bp, bp->phy_port);
1981
1982         return rc;
1983 }
1984
1985 static int
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1987 {
1988         u32 mac_mode;
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994         bp->link_up = 1;
1995         return 0;
1996 }
1997
1998 static int bnx2_test_link(struct bnx2 *);
1999
2000 static int
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2002 {
2003         u32 mac_mode;
2004         int rc, i;
2005
2006         spin_lock_bh(&bp->phy_lock);
2007         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2008                             BMCR_SPEED1000);
2009         spin_unlock_bh(&bp->phy_lock);
2010         if (rc)
2011                 return rc;
2012
2013         for (i = 0; i < 10; i++) {
2014                 if (bnx2_test_link(bp) == 0)
2015                         break;
2016                 msleep(100);
2017         }
2018
2019         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022                       BNX2_EMAC_MODE_25G_MODE);
2023
2024         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026         bp->link_up = 1;
2027         return 0;
2028 }
2029
2030 static int
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2032 {
2033         int i;
2034         u32 val;
2035
2036         bp->fw_wr_seq++;
2037         msg_data |= bp->fw_wr_seq;
2038
2039         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2040
2041         /* wait for an acknowledgement. */
2042         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043                 msleep(10);
2044
2045                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2046
2047                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048                         break;
2049         }
2050         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051                 return 0;
2052
2053         /* If we timed out, inform the firmware that this is the case. */
2054         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055                 if (!silent)
2056                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057                                             "%x\n", msg_data);
2058
2059                 msg_data &= ~BNX2_DRV_MSG_CODE;
2060                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
2062                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2063
2064                 return -EBUSY;
2065         }
2066
2067         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068                 return -EIO;
2069
2070         return 0;
2071 }
2072
2073 static int
2074 bnx2_init_5709_context(struct bnx2 *bp)
2075 {
2076         int i, ret = 0;
2077         u32 val;
2078
2079         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080         val |= (BCM_PAGE_BITS - 8) << 16;
2081         REG_WR(bp, BNX2_CTX_COMMAND, val);
2082         for (i = 0; i < 10; i++) {
2083                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085                         break;
2086                 udelay(2);
2087         }
2088         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089                 return -EBUSY;
2090
2091         for (i = 0; i < bp->ctx_pages; i++) {
2092                 int j;
2093
2094                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098                        (u64) bp->ctx_blk_mapping[i] >> 32);
2099                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101                 for (j = 0; j < 10; j++) {
2102
2103                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105                                 break;
2106                         udelay(5);
2107                 }
2108                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109                         ret = -EBUSY;
2110                         break;
2111                 }
2112         }
2113         return ret;
2114 }
2115
2116 static void
2117 bnx2_init_context(struct bnx2 *bp)
2118 {
2119         u32 vcid;
2120
2121         vcid = 96;
2122         while (vcid) {
2123                 u32 vcid_addr, pcid_addr, offset;
2124                 int i;
2125
2126                 vcid--;
2127
2128                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129                         u32 new_vcid;
2130
2131                         vcid_addr = GET_PCID_ADDR(vcid);
2132                         if (vcid & 0x8) {
2133                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134                         }
2135                         else {
2136                                 new_vcid = vcid;
2137                         }
2138                         pcid_addr = GET_PCID_ADDR(new_vcid);
2139                 }
2140                 else {
2141                         vcid_addr = GET_CID_ADDR(vcid);
2142                         pcid_addr = vcid_addr;
2143                 }
2144
2145                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146                         vcid_addr += (i << PHY_CTX_SHIFT);
2147                         pcid_addr += (i << PHY_CTX_SHIFT);
2148
2149                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2151
2152                         /* Zero out the context. */
2153                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154                                 CTX_WR(bp, vcid_addr, offset, 0);
2155                 }
2156         }
2157 }
2158
2159 static int
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161 {
2162         u16 *good_mbuf;
2163         u32 good_mbuf_cnt;
2164         u32 val;
2165
2166         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167         if (good_mbuf == NULL) {
2168                 printk(KERN_ERR PFX "Failed to allocate memory in "
2169                                     "bnx2_alloc_bad_rbuf\n");
2170                 return -ENOMEM;
2171         }
2172
2173         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176         good_mbuf_cnt = 0;
2177
2178         /* Allocate a bunch of mbufs and save the good ones in an array. */
2179         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187                 /* The addresses with Bit 9 set are bad memory blocks. */
2188                 if (!(val & (1 << 9))) {
2189                         good_mbuf[good_mbuf_cnt] = (u16) val;
2190                         good_mbuf_cnt++;
2191                 }
2192
2193                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194         }
2195
2196         /* Free the good ones back to the mbuf pool thus discarding
2197          * all the bad ones. */
2198         while (good_mbuf_cnt) {
2199                 good_mbuf_cnt--;
2200
2201                 val = good_mbuf[good_mbuf_cnt];
2202                 val = (val << 9) | val | 1;
2203
2204                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205         }
2206         kfree(good_mbuf);
2207         return 0;
2208 }
2209
2210 static void
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2212 {
2213         u32 val;
2214         u8 *mac_addr = bp->dev->dev_addr;
2215
2216         val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
2220         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221                 (mac_addr[4] << 8) | mac_addr[5];
2222
2223         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224 }
2225
2226 static inline int
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228 {
2229         dma_addr_t mapping;
2230         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231         struct rx_bd *rxbd =
2232                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233         struct page *page = alloc_page(GFP_ATOMIC);
2234
2235         if (!page)
2236                 return -ENOMEM;
2237         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238                                PCI_DMA_FROMDEVICE);
2239         rx_pg->page = page;
2240         pci_unmap_addr_set(rx_pg, mapping, mapping);
2241         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243         return 0;
2244 }
2245
2246 static void
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248 {
2249         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250         struct page *page = rx_pg->page;
2251
2252         if (!page)
2253                 return;
2254
2255         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256                        PCI_DMA_FROMDEVICE);
2257
2258         __free_page(page);
2259         rx_pg->page = NULL;
2260 }
2261
2262 static inline int
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264 {
2265         struct sk_buff *skb;
2266         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267         dma_addr_t mapping;
2268         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269         unsigned long align;
2270
2271         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2272         if (skb == NULL) {
2273                 return -ENOMEM;
2274         }
2275
2276         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2278
2279         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280                 PCI_DMA_FROMDEVICE);
2281
2282         rx_buf->skb = skb;
2283         pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288         bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290         return 0;
2291 }
2292
2293 static int
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2295 {
2296         struct status_block *sblk = bp->status_blk;
2297         u32 new_link_state, old_link_state;
2298         int is_set = 1;
2299
2300         new_link_state = sblk->status_attn_bits & event;
2301         old_link_state = sblk->status_attn_bits_ack & event;
2302         if (new_link_state != old_link_state) {
2303                 if (new_link_state)
2304                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305                 else
2306                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307         } else
2308                 is_set = 0;
2309
2310         return is_set;
2311 }
2312
2313 static void
2314 bnx2_phy_int(struct bnx2 *bp)
2315 {
2316         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317                 spin_lock(&bp->phy_lock);
2318                 bnx2_set_link(bp);
2319                 spin_unlock(&bp->phy_lock);
2320         }
2321         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322                 bnx2_set_remote_link(bp);
2323
2324 }
2325
2326 static void
2327 bnx2_tx_int(struct bnx2 *bp)
2328 {
2329         struct status_block *sblk = bp->status_blk;
2330         u16 hw_cons, sw_cons, sw_ring_cons;
2331         int tx_free_bd = 0;
2332
2333         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2334         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2335                 hw_cons++;
2336         }
2337         sw_cons = bp->tx_cons;
2338
2339         while (sw_cons != hw_cons) {
2340                 struct sw_bd *tx_buf;
2341                 struct sk_buff *skb;
2342                 int i, last;
2343
2344                 sw_ring_cons = TX_RING_IDX(sw_cons);
2345
2346                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2347                 skb = tx_buf->skb;
2348
2349                 /* partial BD completions possible with TSO packets */
2350                 if (skb_is_gso(skb)) {
2351                         u16 last_idx, last_ring_idx;
2352
2353                         last_idx = sw_cons +
2354                                 skb_shinfo(skb)->nr_frags + 1;
2355                         last_ring_idx = sw_ring_cons +
2356                                 skb_shinfo(skb)->nr_frags + 1;
2357                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2358                                 last_idx++;
2359                         }
2360                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2361                                 break;
2362                         }
2363                 }
2364
2365                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366                         skb_headlen(skb), PCI_DMA_TODEVICE);
2367
2368                 tx_buf->skb = NULL;
2369                 last = skb_shinfo(skb)->nr_frags;
2370
2371                 for (i = 0; i < last; i++) {
2372                         sw_cons = NEXT_TX_BD(sw_cons);
2373
2374                         pci_unmap_page(bp->pdev,
2375                                 pci_unmap_addr(
2376                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2377                                         mapping),
2378                                 skb_shinfo(skb)->frags[i].size,
2379                                 PCI_DMA_TODEVICE);
2380                 }
2381
2382                 sw_cons = NEXT_TX_BD(sw_cons);
2383
2384                 tx_free_bd += last + 1;
2385
2386                 dev_kfree_skb(skb);
2387
2388                 hw_cons = bp->hw_tx_cons =
2389                         sblk->status_tx_quick_consumer_index0;
2390
2391                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2392                         hw_cons++;
2393                 }
2394         }
2395
2396         bp->tx_cons = sw_cons;
2397         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398          * before checking for netif_queue_stopped().  Without the
2399          * memory barrier, there is a small possibility that bnx2_start_xmit()
2400          * will miss it and cause the queue to be stopped forever.
2401          */
2402         smp_mb();
2403
2404         if (unlikely(netif_queue_stopped(bp->dev)) &&
2405                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406                 netif_tx_lock(bp->dev);
2407                 if ((netif_queue_stopped(bp->dev)) &&
2408                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2409                         netif_wake_queue(bp->dev);
2410                 netif_tx_unlock(bp->dev);
2411         }
2412 }
2413
2414 static void
2415 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2416 {
2417         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2418         struct rx_bd *cons_bd, *prod_bd;
2419         dma_addr_t mapping;
2420         int i;
2421         u16 hw_prod = bp->rx_pg_prod, prod;
2422         u16 cons = bp->rx_pg_cons;
2423
2424         for (i = 0; i < count; i++) {
2425                 prod = RX_PG_RING_IDX(hw_prod);
2426
2427                 prod_rx_pg = &bp->rx_pg_ring[prod];
2428                 cons_rx_pg = &bp->rx_pg_ring[cons];
2429                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2430                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2431
2432                 if (i == 0 && skb) {
2433                         struct page *page;
2434                         struct skb_shared_info *shinfo;
2435
2436                         shinfo = skb_shinfo(skb);
2437                         shinfo->nr_frags--;
2438                         page = shinfo->frags[shinfo->nr_frags].page;
2439                         shinfo->frags[shinfo->nr_frags].page = NULL;
2440                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2441                                                PCI_DMA_FROMDEVICE);
2442                         cons_rx_pg->page = page;
2443                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2444                         dev_kfree_skb(skb);
2445                 }
2446                 if (prod != cons) {
2447                         prod_rx_pg->page = cons_rx_pg->page;
2448                         cons_rx_pg->page = NULL;
2449                         pci_unmap_addr_set(prod_rx_pg, mapping,
2450                                 pci_unmap_addr(cons_rx_pg, mapping));
2451
2452                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2453                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2454
2455                 }
2456                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2457                 hw_prod = NEXT_RX_BD(hw_prod);
2458         }
2459         bp->rx_pg_prod = hw_prod;
2460         bp->rx_pg_cons = cons;
2461 }
2462
2463 static inline void
2464 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2465         u16 cons, u16 prod)
2466 {
2467         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2468         struct rx_bd *cons_bd, *prod_bd;
2469
2470         cons_rx_buf = &bp->rx_buf_ring[cons];
2471         prod_rx_buf = &bp->rx_buf_ring[prod];
2472
2473         pci_dma_sync_single_for_device(bp->pdev,
2474                 pci_unmap_addr(cons_rx_buf, mapping),
2475                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2476
2477         bp->rx_prod_bseq += bp->rx_buf_use_size;
2478
2479         prod_rx_buf->skb = skb;
2480
2481         if (cons == prod)
2482                 return;
2483
2484         pci_unmap_addr_set(prod_rx_buf, mapping,
2485                         pci_unmap_addr(cons_rx_buf, mapping));
2486
2487         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2488         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2489         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2490         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2491 }
2492
2493 static int
2494 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2495             unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2496 {
2497         int err;
2498         u16 prod = ring_idx & 0xffff;
2499
2500         err = bnx2_alloc_rx_skb(bp, prod);
2501         if (unlikely(err)) {
2502                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2503                 if (hdr_len) {
2504                         unsigned int raw_len = len + 4;
2505                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2506
2507                         bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2508                 }
2509                 return err;
2510         }
2511
2512         skb_reserve(skb, bp->rx_offset);
2513         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2514                          PCI_DMA_FROMDEVICE);
2515
2516         if (hdr_len == 0) {
2517                 skb_put(skb, len);
2518                 return 0;
2519         } else {
2520                 unsigned int i, frag_len, frag_size, pages;
2521                 struct sw_pg *rx_pg;
2522                 u16 pg_cons = bp->rx_pg_cons;
2523                 u16 pg_prod = bp->rx_pg_prod;
2524
2525                 frag_size = len + 4 - hdr_len;
2526                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2527                 skb_put(skb, hdr_len);
2528
2529                 for (i = 0; i < pages; i++) {
2530                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2531                         if (unlikely(frag_len <= 4)) {
2532                                 unsigned int tail = 4 - frag_len;
2533
2534                                 bp->rx_pg_cons = pg_cons;
2535                                 bp->rx_pg_prod = pg_prod;
2536                                 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2537                                 skb->len -= tail;
2538                                 if (i == 0) {
2539                                         skb->tail -= tail;
2540                                 } else {
2541                                         skb_frag_t *frag =
2542                                                 &skb_shinfo(skb)->frags[i - 1];
2543                                         frag->size -= tail;
2544                                         skb->data_len -= tail;
2545                                         skb->truesize -= tail;
2546                                 }
2547                                 return 0;
2548                         }
2549                         rx_pg = &bp->rx_pg_ring[pg_cons];
2550
2551                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2552                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2553
2554                         if (i == pages - 1)
2555                                 frag_len -= 4;
2556
2557                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2558                         rx_pg->page = NULL;
2559
2560                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2561                         if (unlikely(err)) {
2562                                 bp->rx_pg_cons = pg_cons;
2563                                 bp->rx_pg_prod = pg_prod;
2564                                 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2565                                 return err;
2566                         }
2567
2568                         frag_size -= frag_len;
2569                         skb->data_len += frag_len;
2570                         skb->truesize += frag_len;
2571                         skb->len += frag_len;
2572
2573                         pg_prod = NEXT_RX_BD(pg_prod);
2574                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2575                 }
2576                 bp->rx_pg_prod = pg_prod;
2577                 bp->rx_pg_cons = pg_cons;
2578         }
2579         return 0;
2580 }
2581
2582 static inline u16
2583 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2584 {
2585         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2586
2587         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2588                 cons++;
2589         return cons;
2590 }
2591
2592 static int
2593 bnx2_rx_int(struct bnx2 *bp, int budget)
2594 {
2595         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2596         struct l2_fhdr *rx_hdr;
2597         int rx_pkt = 0, pg_ring_used = 0;
2598
2599         hw_cons = bnx2_get_hw_rx_cons(bp);
2600         sw_cons = bp->rx_cons;
2601         sw_prod = bp->rx_prod;
2602
2603         /* Memory barrier necessary as speculative reads of the rx
2604          * buffer can be ahead of the index in the status block
2605          */
2606         rmb();
2607         while (sw_cons != hw_cons) {
2608                 unsigned int len, hdr_len;
2609                 u32 status;
2610                 struct sw_bd *rx_buf;
2611                 struct sk_buff *skb;
2612                 dma_addr_t dma_addr;
2613
2614                 sw_ring_cons = RX_RING_IDX(sw_cons);
2615                 sw_ring_prod = RX_RING_IDX(sw_prod);
2616
2617                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2618                 skb = rx_buf->skb;
2619
2620                 rx_buf->skb = NULL;
2621
2622                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2623
2624                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2625                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2626
2627                 rx_hdr = (struct l2_fhdr *) skb->data;
2628                 len = rx_hdr->l2_fhdr_pkt_len;
2629
2630                 if ((status = rx_hdr->l2_fhdr_status) &
2631                         (L2_FHDR_ERRORS_BAD_CRC |
2632                         L2_FHDR_ERRORS_PHY_DECODE |
2633                         L2_FHDR_ERRORS_ALIGNMENT |
2634                         L2_FHDR_ERRORS_TOO_SHORT |
2635                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2636
2637                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2638                         goto next_rx;
2639                 }
2640                 hdr_len = 0;
2641                 if (status & L2_FHDR_STATUS_SPLIT) {
2642                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2643                         pg_ring_used = 1;
2644                 } else if (len > bp->rx_jumbo_thresh) {
2645                         hdr_len = bp->rx_jumbo_thresh;
2646                         pg_ring_used = 1;
2647                 }
2648
2649                 len -= 4;
2650
2651                 if (len <= bp->rx_copy_thresh) {
2652                         struct sk_buff *new_skb;
2653
2654                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2655                         if (new_skb == NULL) {
2656                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2657                                                   sw_ring_prod);
2658                                 goto next_rx;
2659                         }
2660
2661                         /* aligned copy */
2662                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2663                                       new_skb->data, len + 2);
2664                         skb_reserve(new_skb, 2);
2665                         skb_put(new_skb, len);
2666
2667                         bnx2_reuse_rx_skb(bp, skb,
2668                                 sw_ring_cons, sw_ring_prod);
2669
2670                         skb = new_skb;
2671                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2672                                     (sw_ring_cons << 16) | sw_ring_prod)))
2673                         goto next_rx;
2674
2675                 skb->protocol = eth_type_trans(skb, bp->dev);
2676
2677                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2678                         (ntohs(skb->protocol) != 0x8100)) {
2679
2680                         dev_kfree_skb(skb);
2681                         goto next_rx;
2682
2683                 }
2684
2685                 skb->ip_summed = CHECKSUM_NONE;
2686                 if (bp->rx_csum &&
2687                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2688                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2689
2690                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2691                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2692                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2693                 }
2694
2695 #ifdef BCM_VLAN
2696                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2697                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2698                                 rx_hdr->l2_fhdr_vlan_tag);
2699                 }
2700                 else
2701 #endif
2702                         netif_receive_skb(skb);
2703
2704                 bp->dev->last_rx = jiffies;
2705                 rx_pkt++;
2706
2707 next_rx:
2708                 sw_cons = NEXT_RX_BD(sw_cons);
2709                 sw_prod = NEXT_RX_BD(sw_prod);
2710
2711                 if ((rx_pkt == budget))
2712                         break;
2713
2714                 /* Refresh hw_cons to see if there is new work */
2715                 if (sw_cons == hw_cons) {
2716                         hw_cons = bnx2_get_hw_rx_cons(bp);
2717                         rmb();
2718                 }
2719         }
2720         bp->rx_cons = sw_cons;
2721         bp->rx_prod = sw_prod;
2722
2723         if (pg_ring_used)
2724                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2725                          bp->rx_pg_prod);
2726
2727         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2728
2729         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2730
2731         mmiowb();
2732
2733         return rx_pkt;
2734
2735 }
2736
2737 /* MSI ISR - The only difference between this and the INTx ISR
2738  * is that the MSI interrupt is always serviced.
2739  */
2740 static irqreturn_t
2741 bnx2_msi(int irq, void *dev_instance)
2742 {
2743         struct net_device *dev = dev_instance;
2744         struct bnx2 *bp = netdev_priv(dev);
2745
2746         prefetch(bp->status_blk);
2747         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2748                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2749                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2750
2751         /* Return here if interrupt is disabled. */
2752         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2753                 return IRQ_HANDLED;
2754
2755         netif_rx_schedule(dev, &bp->napi);
2756
2757         return IRQ_HANDLED;
2758 }
2759
2760 static irqreturn_t
2761 bnx2_msi_1shot(int irq, void *dev_instance)
2762 {
2763         struct net_device *dev = dev_instance;
2764         struct bnx2 *bp = netdev_priv(dev);
2765
2766         prefetch(bp->status_blk);
2767
2768         /* Return here if interrupt is disabled. */
2769         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2770                 return IRQ_HANDLED;
2771
2772         netif_rx_schedule(dev, &bp->napi);
2773
2774         return IRQ_HANDLED;
2775 }
2776
2777 static irqreturn_t
2778 bnx2_interrupt(int irq, void *dev_instance)
2779 {
2780         struct net_device *dev = dev_instance;
2781         struct bnx2 *bp = netdev_priv(dev);
2782         struct status_block *sblk = bp->status_blk;
2783
2784         /* When using INTx, it is possible for the interrupt to arrive
2785          * at the CPU before the status block posted prior to the
2786          * interrupt. Reading a register will flush the status block.
2787          * When using MSI, the MSI message will always complete after
2788          * the status block write.
2789          */
2790         if ((sblk->status_idx == bp->last_status_idx) &&
2791             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2792              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2793                 return IRQ_NONE;
2794
2795         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2796                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2797                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2798
2799         /* Read back to deassert IRQ immediately to avoid too many
2800          * spurious interrupts.
2801          */
2802         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2803
2804         /* Return here if interrupt is shared and is disabled. */
2805         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2806                 return IRQ_HANDLED;
2807
2808         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2809                 bp->last_status_idx = sblk->status_idx;
2810                 __netif_rx_schedule(dev, &bp->napi);
2811         }
2812
2813         return IRQ_HANDLED;
2814 }
2815
2816 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2817                                  STATUS_ATTN_BITS_TIMER_ABORT)
2818
2819 static inline int
2820 bnx2_has_work(struct bnx2 *bp)
2821 {
2822         struct status_block *sblk = bp->status_blk;
2823
2824         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2825             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2826                 return 1;
2827
2828         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2829             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2830                 return 1;
2831
2832         return 0;
2833 }
2834
2835 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2836 {
2837         struct status_block *sblk = bp->status_blk;
2838         u32 status_attn_bits = sblk->status_attn_bits;
2839         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2840
2841         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2842             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2843
2844                 bnx2_phy_int(bp);
2845
2846                 /* This is needed to take care of transient status
2847                  * during link changes.
2848                  */
2849                 REG_WR(bp, BNX2_HC_COMMAND,
2850                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2851                 REG_RD(bp, BNX2_HC_COMMAND);
2852         }
2853
2854         if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2855                 bnx2_tx_int(bp);
2856
2857         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2858                 work_done += bnx2_rx_int(bp, budget - work_done);
2859
2860         return work_done;
2861 }
2862
2863 static int bnx2_poll(struct napi_struct *napi, int budget)
2864 {
2865         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2866         int work_done = 0;
2867         struct status_block *sblk = bp->status_blk;
2868
2869         while (1) {
2870                 work_done = bnx2_poll_work(bp, work_done, budget);
2871
2872                 if (unlikely(work_done >= budget))
2873                         break;
2874
2875                 /* bp->last_status_idx is used below to tell the hw how
2876                  * much work has been processed, so we must read it before
2877                  * checking for more work.
2878                  */
2879                 bp->last_status_idx = sblk->status_idx;
2880                 rmb();
2881                 if (likely(!bnx2_has_work(bp))) {
2882                         netif_rx_complete(bp->dev, napi);
2883                         if (likely(bp->flags & USING_MSI_FLAG)) {
2884                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2885                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2886                                        bp->last_status_idx);
2887                                 break;
2888                         }
2889                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2890                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2891                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2892                                bp->last_status_idx);
2893
2894                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2895                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2896                                bp->last_status_idx);
2897                         break;
2898                 }
2899         }
2900
2901         return work_done;
2902 }
2903
2904 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2905  * from set_multicast.
2906  */
2907 static void
2908 bnx2_set_rx_mode(struct net_device *dev)
2909 {
2910         struct bnx2 *bp = netdev_priv(dev);
2911         u32 rx_mode, sort_mode;
2912         int i;
2913
2914         spin_lock_bh(&bp->phy_lock);
2915
2916         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2917                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2918         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2919 #ifdef BCM_VLAN
2920         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2921                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2922 #else
2923         if (!(bp->flags & ASF_ENABLE_FLAG))
2924                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2925 #endif
2926         if (dev->flags & IFF_PROMISC) {
2927                 /* Promiscuous mode. */
2928                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2929                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2930                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2931         }
2932         else if (dev->flags & IFF_ALLMULTI) {
2933                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2934                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2935                                0xffffffff);
2936                 }
2937                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2938         }
2939         else {
2940                 /* Accept one or more multicast(s). */
2941                 struct dev_mc_list *mclist;
2942                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2943                 u32 regidx;
2944                 u32 bit;
2945                 u32 crc;
2946
2947                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2948
2949                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2950                      i++, mclist = mclist->next) {
2951
2952                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2953                         bit = crc & 0xff;
2954                         regidx = (bit & 0xe0) >> 5;
2955                         bit &= 0x1f;
2956                         mc_filter[regidx] |= (1 << bit);
2957                 }
2958
2959                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961                                mc_filter[i]);
2962                 }
2963
2964                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2965         }
2966
2967         if (rx_mode != bp->rx_mode) {
2968                 bp->rx_mode = rx_mode;
2969                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2970         }
2971
2972         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2973         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2974         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2975
2976         spin_unlock_bh(&bp->phy_lock);
2977 }
2978
2979 static void
2980 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2981         u32 rv2p_proc)
2982 {
2983         int i;
2984         u32 val;
2985
2986
2987         for (i = 0; i < rv2p_code_len; i += 8) {
2988                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2989                 rv2p_code++;
2990                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2991                 rv2p_code++;
2992
2993                 if (rv2p_proc == RV2P_PROC1) {
2994                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2995                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2996                 }
2997                 else {
2998                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2999                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3000                 }
3001         }
3002
3003         /* Reset the processor, un-stall is done later. */
3004         if (rv2p_proc == RV2P_PROC1) {
3005                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3006         }
3007         else {
3008                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3009         }
3010 }
3011
3012 static int
3013 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3014 {
3015         u32 offset;
3016         u32 val;
3017         int rc;
3018
3019         /* Halt the CPU. */
3020         val = REG_RD_IND(bp, cpu_reg->mode);
3021         val |= cpu_reg->mode_value_halt;
3022         REG_WR_IND(bp, cpu_reg->mode, val);
3023         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3024
3025         /* Load the Text area. */
3026         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3027         if (fw->gz_text) {
3028                 int j;
3029
3030                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3031                                        fw->gz_text_len);
3032                 if (rc < 0)
3033                         return rc;
3034
3035                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3036                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3037                 }
3038         }
3039
3040         /* Load the Data area. */
3041         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3042         if (fw->data) {
3043                 int j;
3044
3045                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3046                         REG_WR_IND(bp, offset, fw->data[j]);
3047                 }
3048         }
3049
3050         /* Load the SBSS area. */
3051         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3052         if (fw->sbss_len) {
3053                 int j;
3054
3055                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3056                         REG_WR_IND(bp, offset, 0);
3057                 }
3058         }
3059
3060         /* Load the BSS area. */
3061         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3062         if (fw->bss_len) {
3063                 int j;
3064
3065                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3066                         REG_WR_IND(bp, offset, 0);
3067                 }
3068         }
3069
3070         /* Load the Read-Only area. */
3071         offset = cpu_reg->spad_base +
3072                 (fw->rodata_addr - cpu_reg->mips_view_base);
3073         if (fw->rodata) {
3074                 int j;
3075
3076                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3077                         REG_WR_IND(bp, offset, fw->rodata[j]);
3078                 }
3079         }
3080
3081         /* Clear the pre-fetch instruction. */
3082         REG_WR_IND(bp, cpu_reg->inst, 0);
3083         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3084
3085         /* Start the CPU. */
3086         val = REG_RD_IND(bp, cpu_reg->mode);
3087         val &= ~cpu_reg->mode_value_halt;
3088         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089         REG_WR_IND(bp, cpu_reg->mode, val);
3090
3091         return 0;
3092 }
3093
3094 static int
3095 bnx2_init_cpus(struct bnx2 *bp)
3096 {
3097         struct cpu_reg cpu_reg;
3098         struct fw_info *fw;
3099         int rc, rv2p_len;
3100         void *text, *rv2p;
3101
3102         /* Initialize the RV2P processor. */
3103         text = vmalloc(FW_BUF_SIZE);
3104         if (!text)
3105                 return -ENOMEM;
3106         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3107                 rv2p = bnx2_xi_rv2p_proc1;
3108                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3109         } else {
3110                 rv2p = bnx2_rv2p_proc1;
3111                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3112         }
3113         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3114         if (rc < 0)
3115                 goto init_cpu_err;
3116
3117         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3118
3119         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3120                 rv2p = bnx2_xi_rv2p_proc2;
3121                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3122         } else {
3123                 rv2p = bnx2_rv2p_proc2;
3124                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3125         }
3126         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3127         if (rc < 0)
3128                 goto init_cpu_err;
3129
3130         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3131
3132         /* Initialize the RX Processor. */
3133         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3134         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3135         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3136         cpu_reg.state = BNX2_RXP_CPU_STATE;
3137         cpu_reg.state_value_clear = 0xffffff;
3138         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3139         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3140         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3141         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3142         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3143         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3144         cpu_reg.mips_view_base = 0x8000000;
3145
3146         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3147                 fw = &bnx2_rxp_fw_09;
3148         else
3149                 fw = &bnx2_rxp_fw_06;
3150
3151         fw->text = text;
3152         rc = load_cpu_fw(bp, &cpu_reg, fw);
3153         if (rc)
3154                 goto init_cpu_err;
3155
3156         /* Initialize the TX Processor. */
3157         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3158         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3159         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3160         cpu_reg.state = BNX2_TXP_CPU_STATE;
3161         cpu_reg.state_value_clear = 0xffffff;
3162         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3163         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3164         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3165         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3166         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3167         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3168         cpu_reg.mips_view_base = 0x8000000;
3169
3170         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3171                 fw = &bnx2_txp_fw_09;
3172         else
3173                 fw = &bnx2_txp_fw_06;
3174
3175         fw->text = text;
3176         rc = load_cpu_fw(bp, &cpu_reg, fw);
3177         if (rc)
3178                 goto init_cpu_err;
3179
3180         /* Initialize the TX Patch-up Processor. */
3181         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3182         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3183         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3184         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3185         cpu_reg.state_value_clear = 0xffffff;
3186         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3187         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3188         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3189         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3190         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3191         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3192         cpu_reg.mips_view_base = 0x8000000;
3193
3194         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3195                 fw = &bnx2_tpat_fw_09;
3196         else
3197                 fw = &bnx2_tpat_fw_06;
3198
3199         fw->text = text;
3200         rc = load_cpu_fw(bp, &cpu_reg, fw);
3201         if (rc)
3202                 goto init_cpu_err;
3203
3204         /* Initialize the Completion Processor. */
3205         cpu_reg.mode = BNX2_COM_CPU_MODE;
3206         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3207         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3208         cpu_reg.state = BNX2_COM_CPU_STATE;
3209         cpu_reg.state_value_clear = 0xffffff;
3210         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3211         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3212         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3213         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3214         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3215         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3216         cpu_reg.mips_view_base = 0x8000000;
3217
3218         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3219                 fw = &bnx2_com_fw_09;
3220         else
3221                 fw = &bnx2_com_fw_06;
3222
3223         fw->text = text;
3224         rc = load_cpu_fw(bp, &cpu_reg, fw);
3225         if (rc)
3226                 goto init_cpu_err;
3227
3228         /* Initialize the Command Processor. */
3229         cpu_reg.mode = BNX2_CP_CPU_MODE;
3230         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3231         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3232         cpu_reg.state = BNX2_CP_CPU_STATE;
3233         cpu_reg.state_value_clear = 0xffffff;
3234         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3235         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3236         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3237         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3238         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3239         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3240         cpu_reg.mips_view_base = 0x8000000;
3241
3242         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3243                 fw = &bnx2_cp_fw_09;
3244         else
3245                 fw = &bnx2_cp_fw_06;
3246
3247         fw->text = text;
3248         rc = load_cpu_fw(bp, &cpu_reg, fw);
3249
3250 init_cpu_err:
3251         vfree(text);
3252         return rc;
3253 }
3254
3255 static int
3256 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3257 {
3258         u16 pmcsr;
3259
3260         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3261
3262         switch (state) {
3263         case PCI_D0: {
3264                 u32 val;
3265
3266                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3267                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3268                         PCI_PM_CTRL_PME_STATUS);
3269
3270                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3271                         /* delay required during transition out of D3hot */
3272                         msleep(20);
3273
3274                 val = REG_RD(bp, BNX2_EMAC_MODE);
3275                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3276                 val &= ~BNX2_EMAC_MODE_MPKT;
3277                 REG_WR(bp, BNX2_EMAC_MODE, val);
3278
3279                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3280                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3281                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3282                 break;
3283         }
3284         case PCI_D3hot: {
3285                 int i;
3286                 u32 val, wol_msg;
3287
3288                 if (bp->wol) {
3289                         u32 advertising;
3290                         u8 autoneg;
3291
3292                         autoneg = bp->autoneg;
3293                         advertising = bp->advertising;
3294
3295                         if (bp->phy_port == PORT_TP) {
3296                                 bp->autoneg = AUTONEG_SPEED;
3297                                 bp->advertising = ADVERTISED_10baseT_Half |
3298                                         ADVERTISED_10baseT_Full |
3299                                         ADVERTISED_100baseT_Half |
3300                                         ADVERTISED_100baseT_Full |
3301                                         ADVERTISED_Autoneg;
3302                         }
3303
3304                         spin_lock_bh(&bp->phy_lock);
3305                         bnx2_setup_phy(bp, bp->phy_port);
3306                         spin_unlock_bh(&bp->phy_lock);
3307
3308                         bp->autoneg = autoneg;
3309                         bp->advertising = advertising;
3310
3311                         bnx2_set_mac_addr(bp);
3312
3313                         val = REG_RD(bp, BNX2_EMAC_MODE);
3314
3315                         /* Enable port mode. */
3316                         val &= ~BNX2_EMAC_MODE_PORT;
3317                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3318                                BNX2_EMAC_MODE_ACPI_RCVD |
3319                                BNX2_EMAC_MODE_MPKT;
3320                         if (bp->phy_port == PORT_TP)
3321                                 val |= BNX2_EMAC_MODE_PORT_MII;
3322                         else {
3323                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3324                                 if (bp->line_speed == SPEED_2500)
3325                                         val |= BNX2_EMAC_MODE_25G_MODE;
3326                         }
3327
3328                         REG_WR(bp, BNX2_EMAC_MODE, val);
3329
3330                         /* receive all multicast */
3331                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3332                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3333                                        0xffffffff);
3334                         }
3335                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3336                                BNX2_EMAC_RX_MODE_SORT_MODE);
3337
3338                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3339                               BNX2_RPM_SORT_USER0_MC_EN;
3340                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3341                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3342                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3343                                BNX2_RPM_SORT_USER0_ENA);
3344
3345                         /* Need to enable EMAC and RPM for WOL. */
3346                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3347                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3348                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3349                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3350
3351                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3352                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3353                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3354
3355                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3356                 }
3357                 else {
3358                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3359                 }
3360
3361                 if (!(bp->flags & NO_WOL_FLAG))
3362                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3363
3364                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3365                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3366                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3367
3368                         if (bp->wol)
3369                                 pmcsr |= 3;
3370                 }
3371                 else {
3372                         pmcsr |= 3;
3373                 }
3374                 if (bp->wol) {
3375                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3376                 }
3377                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3378                                       pmcsr);
3379
3380                 /* No more memory access after this point until
3381                  * device is brought back to D0.
3382                  */
3383                 udelay(50);
3384                 break;
3385         }
3386         default:
3387                 return -EINVAL;
3388         }
3389         return 0;
3390 }
3391
3392 static int
3393 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3394 {
3395         u32 val;
3396         int j;
3397
3398         /* Request access to the flash interface. */
3399         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3400         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3401                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3402                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3403                         break;
3404
3405                 udelay(5);
3406         }
3407
3408         if (j >= NVRAM_TIMEOUT_COUNT)
3409                 return -EBUSY;
3410
3411         return 0;
3412 }
3413
3414 static int
3415 bnx2_release_nvram_lock(struct bnx2 *bp)
3416 {
3417         int j;
3418         u32 val;
3419
3420         /* Relinquish nvram interface. */
3421         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3422
3423         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3425                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3426                         break;
3427
3428                 udelay(5);
3429         }
3430
3431         if (j >= NVRAM_TIMEOUT_COUNT)
3432                 return -EBUSY;
3433
3434         return 0;
3435 }
3436
3437
3438 static int
3439 bnx2_enable_nvram_write(struct bnx2 *bp)
3440 {
3441         u32 val;
3442
3443         val = REG_RD(bp, BNX2_MISC_CFG);
3444         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3445
3446         if (bp->flash_info->flags & BNX2_NV_WREN) {
3447                 int j;
3448
3449                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450                 REG_WR(bp, BNX2_NVM_COMMAND,
3451                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3452
3453                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3454                         udelay(5);
3455
3456                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3457                         if (val & BNX2_NVM_COMMAND_DONE)
3458                                 break;
3459                 }
3460
3461                 if (j >= NVRAM_TIMEOUT_COUNT)
3462                         return -EBUSY;
3463         }
3464         return 0;
3465 }
3466
3467 static void
3468 bnx2_disable_nvram_write(struct bnx2 *bp)
3469 {
3470         u32 val;
3471
3472         val = REG_RD(bp, BNX2_MISC_CFG);
3473         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3474 }
3475
3476
3477 static void
3478 bnx2_enable_nvram_access(struct bnx2 *bp)
3479 {
3480         u32 val;
3481
3482         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3483         /* Enable both bits, even on read. */
3484         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3485                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3486 }
3487
3488 static void
3489 bnx2_disable_nvram_access(struct bnx2 *bp)
3490 {
3491         u32 val;
3492
3493         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3494         /* Disable both bits, even after read. */
3495         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3496                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3497                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3498 }
3499
3500 static int
3501 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3502 {
3503         u32 cmd;
3504         int j;
3505
3506         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3507                 /* Buffered flash, no erase needed */
3508                 return 0;
3509
3510         /* Build an erase command */
3511         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3512               BNX2_NVM_COMMAND_DOIT;
3513
3514         /* Need to clear DONE bit separately. */
3515         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3516
3517         /* Address of the NVRAM to read from. */
3518         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3519
3520         /* Issue an erase command. */
3521         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3522
3523         /* Wait for completion. */
3524         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3525                 u32 val;
3526
3527                 udelay(5);
3528
3529                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3530                 if (val & BNX2_NVM_COMMAND_DONE)
3531                         break;
3532         }
3533
3534         if (j >= NVRAM_TIMEOUT_COUNT)
3535                 return -EBUSY;
3536
3537         return 0;
3538 }
3539
3540 static int
3541 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3542 {
3543         u32 cmd;
3544         int j;
3545
3546         /* Build the command word. */
3547         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3548
3549         /* Calculate an offset of a buffered flash, not needed for 5709. */
3550         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3551                 offset = ((offset / bp->flash_info->page_size) <<
3552                            bp->flash_info->page_bits) +
3553                           (offset % bp->flash_info->page_size);
3554         }
3555
3556         /* Need to clear DONE bit separately. */
3557         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3558
3559         /* Address of the NVRAM to read from. */
3560         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3561
3562         /* Issue a read command. */
3563         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3564
3565         /* Wait for completion. */
3566         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3567                 u32 val;
3568
3569                 udelay(5);
3570
3571                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3572                 if (val & BNX2_NVM_COMMAND_DONE) {
3573                         val = REG_RD(bp, BNX2_NVM_READ);
3574
3575                         val = be32_to_cpu(val);
3576                         memcpy(ret_val, &val, 4);
3577                         break;
3578                 }
3579         }
3580         if (j >= NVRAM_TIMEOUT_COUNT)
3581                 return -EBUSY;
3582
3583         return 0;
3584 }
3585
3586
3587 static int
3588 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3589 {
3590         u32 cmd, val32;
3591         int j;
3592
3593         /* Build the command word. */
3594         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3595
3596         /* Calculate an offset of a buffered flash, not needed for 5709. */
3597         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3598                 offset = ((offset / bp->flash_info->page_size) <<
3599                           bp->flash_info->page_bits) +
3600                          (offset % bp->flash_info->page_size);
3601         }
3602
3603         /* Need to clear DONE bit separately. */
3604         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3605
3606         memcpy(&val32, val, 4);
3607         val32 = cpu_to_be32(val32);
3608
3609         /* Write the data. */
3610         REG_WR(bp, BNX2_NVM_WRITE, val32);
3611
3612         /* Address of the NVRAM to write to. */
3613         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3614
3615         /* Issue the write command. */
3616         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3617
3618         /* Wait for completion. */
3619         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3620                 udelay(5);
3621
3622                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3623                         break;
3624         }
3625         if (j >= NVRAM_TIMEOUT_COUNT)
3626                 return -EBUSY;
3627
3628         return 0;
3629 }
3630
3631 static int
3632 bnx2_init_nvram(struct bnx2 *bp)
3633 {
3634         u32 val;
3635         int j, entry_count, rc = 0;
3636         struct flash_spec *flash;
3637
3638         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639                 bp->flash_info = &flash_5709;
3640                 goto get_flash_size;
3641         }
3642
3643         /* Determine the selected interface. */
3644         val = REG_RD(bp, BNX2_NVM_CFG1);
3645
3646         entry_count = ARRAY_SIZE(flash_table);
3647
3648         if (val & 0x40000000) {
3649
3650                 /* Flash interface has been reconfigured */
3651                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3652                      j++, flash++) {
3653                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3654                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3655                                 bp->flash_info = flash;
3656                                 break;
3657                         }
3658                 }
3659         }
3660         else {
3661                 u32 mask;
3662                 /* Not yet been reconfigured */
3663
3664                 if (val & (1 << 23))
3665                         mask = FLASH_BACKUP_STRAP_MASK;
3666                 else
3667                         mask = FLASH_STRAP_MASK;
3668
3669                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3670                         j++, flash++) {
3671
3672                         if ((val & mask) == (flash->strapping & mask)) {
3673                                 bp->flash_info = flash;
3674
3675                                 /* Request access to the flash interface. */
3676                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677                                         return rc;
3678
3679                                 /* Enable access to flash interface */
3680                                 bnx2_enable_nvram_access(bp);
3681
3682                                 /* Reconfigure the flash interface */
3683                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3684                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3685                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3686                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3687
3688                                 /* Disable access to flash interface */
3689                                 bnx2_disable_nvram_access(bp);
3690                                 bnx2_release_nvram_lock(bp);
3691
3692                                 break;
3693                         }
3694                 }
3695         } /* if (val & 0x40000000) */
3696
3697         if (j == entry_count) {
3698                 bp->flash_info = NULL;
3699                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3700                 return -ENODEV;
3701         }
3702
3703 get_flash_size:
3704         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3705         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3706         if (val)
3707                 bp->flash_size = val;
3708         else
3709                 bp->flash_size = bp->flash_info->total_size;
3710
3711         return rc;
3712 }
3713
3714 static int
3715 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3716                 int buf_size)
3717 {
3718         int rc = 0;
3719         u32 cmd_flags, offset32, len32, extra;
3720
3721         if (buf_size == 0)
3722                 return 0;
3723
3724         /* Request access to the flash interface. */
3725         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3726                 return rc;
3727
3728         /* Enable access to flash interface */
3729         bnx2_enable_nvram_access(bp);
3730
3731         len32 = buf_size;
3732         offset32 = offset;
3733         extra = 0;
3734
3735         cmd_flags = 0;
3736
3737         if (offset32 & 3) {
3738                 u8 buf[4];
3739                 u32 pre_len;
3740
3741                 offset32 &= ~3;
3742                 pre_len = 4 - (offset & 3);
3743
3744                 if (pre_len >= len32) {
3745                         pre_len = len32;
3746                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3747                                     BNX2_NVM_COMMAND_LAST;
3748                 }
3749                 else {
3750                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3751                 }
3752
3753                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3754
3755                 if (rc)
3756                         return rc;
3757
3758                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3759
3760                 offset32 += 4;
3761                 ret_buf += pre_len;
3762                 len32 -= pre_len;
3763         }
3764         if (len32 & 3) {
3765                 extra = 4 - (len32 & 3);
3766                 len32 = (len32 + 4) & ~3;
3767         }
3768
3769         if (len32 == 4) {
3770                 u8 buf[4];
3771
3772                 if (cmd_flags)
3773                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3774                 else
3775                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3776                                     BNX2_NVM_COMMAND_LAST;
3777
3778                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3779
3780                 memcpy(ret_buf, buf, 4 - extra);
3781         }
3782         else if (len32 > 0) {
3783                 u8 buf[4];
3784
3785                 /* Read the first word. */
3786                 if (cmd_flags)
3787                         cmd_flags = 0;
3788                 else
3789                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3790
3791                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3792
3793                 /* Advance to the next dword. */
3794                 offset32 += 4;
3795                 ret_buf += 4;
3796                 len32 -= 4;
3797
3798                 while (len32 > 4 && rc == 0) {
3799                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3800
3801                         /* Advance to the next dword. */
3802                         offset32 += 4;
3803                         ret_buf += 4;
3804                         len32 -= 4;
3805                 }
3806
3807                 if (rc)
3808                         return rc;
3809
3810                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3811                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3812
3813                 memcpy(ret_buf, buf, 4 - extra);
3814         }
3815
3816         /* Disable access to flash interface */
3817         bnx2_disable_nvram_access(bp);
3818
3819         bnx2_release_nvram_lock(bp);
3820
3821         return rc;
3822 }
3823
3824 static int
3825 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3826                 int buf_size)
3827 {
3828         u32 written, offset32, len32;
3829         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3830         int rc = 0;
3831         int align_start, align_end;
3832
3833         buf = data_buf;
3834         offset32 = offset;
3835         len32 = buf_size;
3836         align_start = align_end = 0;
3837
3838         if ((align_start = (offset32 & 3))) {
3839                 offset32 &= ~3;
3840                 len32 += align_start;
3841                 if (len32 < 4)
3842                         len32 = 4;
3843                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3844                         return rc;
3845         }
3846
3847         if (len32 & 3) {
3848                 align_end = 4 - (len32 & 3);
3849                 len32 += align_end;
3850                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3851                         return rc;
3852         }
3853
3854         if (align_start || align_end) {
3855                 align_buf = kmalloc(len32, GFP_KERNEL);
3856                 if (align_buf == NULL)
3857                         return -ENOMEM;
3858                 if (align_start) {
3859                         memcpy(align_buf, start, 4);
3860                 }
3861                 if (align_end) {
3862                         memcpy(align_buf + len32 - 4, end, 4);
3863                 }
3864                 memcpy(align_buf + align_start, data_buf, buf_size);
3865                 buf = align_buf;
3866         }
3867
3868         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3869                 flash_buffer = kmalloc(264, GFP_KERNEL);
3870                 if (flash_buffer == NULL) {
3871                         rc = -ENOMEM;
3872                         goto nvram_write_end;
3873                 }
3874         }
3875
3876         written = 0;
3877         while ((written < len32) && (rc == 0)) {
3878                 u32 page_start, page_end, data_start, data_end;
3879                 u32 addr, cmd_flags;
3880                 int i;
3881
3882                 /* Find the page_start addr */
3883                 page_start = offset32 + written;
3884                 page_start -= (page_start % bp->flash_info->page_size);
3885                 /* Find the page_end addr */
3886                 page_end = page_start + bp->flash_info->page_size;
3887                 /* Find the data_start addr */
3888                 data_start = (written == 0) ? offset32 : page_start;
3889                 /* Find the data_end addr */
3890                 data_end = (page_end > offset32 + len32) ?
3891                         (offset32 + len32) : page_end;
3892
3893                 /* Request access to the flash interface. */
3894                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3895                         goto nvram_write_end;
3896
3897                 /* Enable access to flash interface */
3898                 bnx2_enable_nvram_access(bp);
3899
3900                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3901                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3902                         int j;
3903
3904                         /* Read the whole page into the buffer
3905                          * (non-buffer flash only) */
3906                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3907                                 if (j == (bp->flash_info->page_size - 4)) {
3908                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3909                                 }
3910                                 rc = bnx2_nvram_read_dword(bp,
3911                                         page_start + j,
3912                                         &flash_buffer[j],
3913                                         cmd_flags);
3914
3915                                 if (rc)
3916                                         goto nvram_write_end;
3917
3918                                 cmd_flags = 0;
3919                         }
3920                 }
3921
3922                 /* Enable writes to flash interface (unlock write-protect) */
3923                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3924                         goto nvram_write_end;
3925
3926                 /* Loop to write back the buffer data from page_start to
3927                  * data_start */
3928                 i = 0;
3929                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3930                         /* Erase the page */
3931                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3932                                 goto nvram_write_end;
3933
3934                         /* Re-enable the write again for the actual write */
3935                         bnx2_enable_nvram_write(bp);
3936
3937                         for (addr = page_start; addr < data_start;
3938                                 addr += 4, i += 4) {
3939
3940                                 rc = bnx2_nvram_write_dword(bp, addr,
3941                                         &flash_buffer[i], cmd_flags);
3942
3943                                 if (rc != 0)
3944                                         goto nvram_write_end;
3945
3946                                 cmd_flags = 0;
3947                         }
3948                 }
3949
3950                 /* Loop to write the new data from data_start to data_end */
3951                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3952                         if ((addr == page_end - 4) ||
3953                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3954                                  (addr == data_end - 4))) {
3955
3956                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3957                         }
3958                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3959                                 cmd_flags);
3960
3961                         if (rc != 0)
3962                                 goto nvram_write_end;
3963
3964                         cmd_flags = 0;
3965                         buf += 4;
3966                 }
3967
3968                 /* Loop to write back the buffer data from data_end
3969                  * to page_end */
3970                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3971                         for (addr = data_end; addr < page_end;
3972                                 addr += 4, i += 4) {
3973
3974                                 if (addr == page_end-4) {
3975                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3976                                 }
3977                                 rc = bnx2_nvram_write_dword(bp, addr,
3978                                         &flash_buffer[i], cmd_flags);
3979
3980                                 if (rc != 0)
3981                                         goto nvram_write_end;
3982
3983                                 cmd_flags = 0;
3984                         }
3985                 }
3986
3987                 /* Disable writes to flash interface (lock write-protect) */
3988                 bnx2_disable_nvram_write(bp);
3989
3990                 /* Disable access to flash interface */
3991                 bnx2_disable_nvram_access(bp);
3992                 bnx2_release_nvram_lock(bp);
3993
3994                 /* Increment written */
3995                 written += data_end - data_start;
3996         }
3997
3998 nvram_write_end:
3999         kfree(flash_buffer);
4000         kfree(align_buf);
4001         return rc;
4002 }
4003
4004 static void
4005 bnx2_init_remote_phy(struct bnx2 *bp)
4006 {
4007         u32 val;
4008
4009         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4010         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4011                 return;
4012
4013         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4014         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4015                 return;
4016
4017         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4018                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4019
4020                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4021                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4022                         bp->phy_port = PORT_FIBRE;
4023                 else
4024                         bp->phy_port = PORT_TP;
4025
4026                 if (netif_running(bp->dev)) {
4027                         u32 sig;
4028
4029                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4030                                 bp->link_up = 1;
4031                                 netif_carrier_on(bp->dev);
4032                         } else {
4033                                 bp->link_up = 0;
4034                                 netif_carrier_off(bp->dev);
4035                         }
4036                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4037                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4038                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4039                                    sig);
4040                 }
4041         }
4042 }
4043
4044 static int
4045 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4046 {
4047         u32 val;
4048         int i, rc = 0;
4049         u8 old_port;
4050
4051         /* Wait for the current PCI transaction to complete before
4052          * issuing a reset. */
4053         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4054                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4055                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4056                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4057                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4058         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4059         udelay(5);
4060
4061         /* Wait for the firmware to tell us it is ok to issue a reset. */
4062         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4063
4064         /* Deposit a driver reset signature so the firmware knows that
4065          * this is a soft reset. */
4066         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4067                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4068
4069         /* Do a dummy read to force the chip to complete all current transaction
4070          * before we issue a reset. */
4071         val = REG_RD(bp, BNX2_MISC_ID);
4072
4073         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4074                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4075                 REG_RD(bp, BNX2_MISC_COMMAND);
4076                 udelay(5);
4077
4078                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4079                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4080
4081                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4082
4083         } else {
4084                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4085                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4086                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4087
4088                 /* Chip reset. */
4089                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4090
4091                 /* Reading back any register after chip reset will hang the
4092                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4093                  * of margin for write posting.
4094                  */
4095                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4096                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4097                         msleep(20);
4098
4099                 /* Reset takes approximate 30 usec */
4100                 for (i = 0; i < 10; i++) {
4101                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4102                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4103                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4104                                 break;
4105                         udelay(10);
4106                 }
4107
4108                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4109                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4110                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4111                         return -EBUSY;
4112                 }
4113         }
4114
4115         /* Make sure byte swapping is properly configured. */
4116         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4117         if (val != 0x01020304) {
4118                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4119                 return -ENODEV;
4120         }
4121
4122         /* Wait for the firmware to finish its initialization. */
4123         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4124         if (rc)
4125                 return rc;
4126
4127         spin_lock_bh(&bp->phy_lock);
4128         old_port = bp->phy_port;
4129         bnx2_init_remote_phy(bp);
4130         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4131                 bnx2_set_default_remote_link(bp);
4132         spin_unlock_bh(&bp->phy_lock);
4133
4134         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4135                 /* Adjust the voltage regular to two steps lower.  The default
4136                  * of this register is 0x0000000e. */
4137                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4138
4139                 /* Remove bad rbuf memory from the free pool. */
4140                 rc = bnx2_alloc_bad_rbuf(bp);
4141         }
4142
4143         return rc;
4144 }
4145
4146 static int
4147 bnx2_init_chip(struct bnx2 *bp)
4148 {
4149         u32 val;
4150         int rc;
4151
4152         /* Make sure the interrupt is not active. */
4153         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4154
4155         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4156               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4157 #ifdef __BIG_ENDIAN
4158               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4159 #endif
4160               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4161               DMA_READ_CHANS << 12 |
4162               DMA_WRITE_CHANS << 16;
4163
4164         val |= (0x2 << 20) | (1 << 11);
4165
4166         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4167                 val |= (1 << 23);
4168
4169         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4170             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4171                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4172
4173         REG_WR(bp, BNX2_DMA_CONFIG, val);
4174
4175         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4176                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4177                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4178                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4179         }
4180
4181         if (bp->flags & PCIX_FLAG) {
4182                 u16 val16;
4183
4184                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4185                                      &val16);
4186                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4187                                       val16 & ~PCI_X_CMD_ERO);
4188         }
4189
4190         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4191                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4192                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4193                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4194
4195         /* Initialize context mapping and zero out the quick contexts.  The
4196          * context block must have already been enabled. */
4197         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4198                 rc = bnx2_init_5709_context(bp);
4199                 if (rc)
4200                         return rc;
4201         } else
4202                 bnx2_init_context(bp);
4203
4204         if ((rc = bnx2_init_cpus(bp)) != 0)
4205                 return rc;
4206
4207         bnx2_init_nvram(bp);
4208
4209         bnx2_set_mac_addr(bp);
4210
4211         val = REG_RD(bp, BNX2_MQ_CONFIG);
4212         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4213         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4214         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4215                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4216
4217         REG_WR(bp, BNX2_MQ_CONFIG, val);
4218
4219         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4220         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4221         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4222
4223         val = (BCM_PAGE_BITS - 8) << 24;
4224         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4225
4226         /* Configure page size. */
4227         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4228         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4229         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4230         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4231
4232         val = bp->mac_addr[0] +
4233               (bp->mac_addr[1] << 8) +
4234               (bp->mac_addr[2] << 16) +
4235               bp->mac_addr[3] +
4236               (bp->mac_addr[4] << 8) +
4237               (bp->mac_addr[5] << 16);
4238         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4239
4240         /* Program the MTU.  Also include 4 bytes for CRC32. */
4241         val = bp->dev->mtu + ETH_HLEN + 4;
4242         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4243                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4244         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4245
4246         bp->last_status_idx = 0;
4247         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4248
4249         /* Set up how to generate a link change interrupt. */
4250         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4251
4252         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4253                (u64) bp->status_blk_mapping & 0xffffffff);
4254         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4255
4256         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4257                (u64) bp->stats_blk_mapping & 0xffffffff);
4258         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4259                (u64) bp->stats_blk_mapping >> 32);
4260
4261         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4262                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4263
4264         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4265                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4266
4267         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4268                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4269
4270         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4271
4272         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4273
4274         REG_WR(bp, BNX2_HC_COM_TICKS,
4275                (bp->com_ticks_int << 16) | bp->com_ticks);
4276
4277         REG_WR(bp, BNX2_HC_CMD_TICKS,
4278                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4279
4280         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4281                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4282         else
4283                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4284         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4285
4286         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4287                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4288         else {
4289                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4290                       BNX2_HC_CONFIG_COLLECT_STATS;
4291         }
4292
4293         if (bp->flags & ONE_SHOT_MSI_FLAG)
4294                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4295
4296         REG_WR(bp, BNX2_HC_CONFIG, val);
4297
4298         /* Clear internal stats counters. */
4299         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4300
4301         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4302
4303         /* Initialize the receive filter. */
4304         bnx2_set_rx_mode(bp->dev);
4305
4306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4307                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4308                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4309                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4310         }
4311         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4312                           0);
4313
4314         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4315         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4316
4317         udelay(20);
4318
4319         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4320
4321         return rc;
4322 }
4323
4324 static void
4325 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4326 {
4327         u32 val, offset0, offset1, offset2, offset3;
4328
4329         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4330                 offset0 = BNX2_L2CTX_TYPE_XI;
4331                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4332                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4333                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4334         } else {
4335                 offset0 = BNX2_L2CTX_TYPE;
4336                 offset1 = BNX2_L2CTX_CMD_TYPE;
4337                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4338                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4339         }
4340         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4341         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4342
4343         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4344         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4345
4346         val = (u64) bp->tx_desc_mapping >> 32;
4347         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4348
4349         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4350         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4351 }
4352
4353 static void
4354 bnx2_init_tx_ring(struct bnx2 *bp)
4355 {
4356         struct tx_bd *txbd;
4357         u32 cid;
4358
4359         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4360
4361         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4362
4363         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4364         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4365
4366         bp->tx_prod = 0;
4367         bp->tx_cons = 0;
4368         bp->hw_tx_cons = 0;
4369         bp->tx_prod_bseq = 0;
4370
4371         cid = TX_CID;
4372         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4373         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4374
4375         bnx2_init_tx_context(bp, cid);
4376 }
4377
4378 static void
4379 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4380                      int num_rings)
4381 {
4382         int i;
4383         struct rx_bd *rxbd;
4384
4385         for (i = 0; i < num_rings; i++) {
4386                 int j;
4387
4388                 rxbd = &rx_ring[i][0];
4389                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4390                         rxbd->rx_bd_len = buf_size;
4391                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4392                 }
4393                 if (i == (num_rings - 1))
4394                         j = 0;
4395                 else
4396                         j = i + 1;
4397                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4398                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4399         }
4400 }
4401
4402 static void
4403 bnx2_init_rx_ring(struct bnx2 *bp)
4404 {
4405         int i;
4406         u16 prod, ring_prod;
4407         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4408
4409         bp->rx_prod = 0;
4410         bp->rx_cons = 0;
4411         bp->rx_prod_bseq = 0;
4412         bp->rx_pg_prod = 0;
4413         bp->rx_pg_cons = 0;
4414
4415         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4416                              bp->rx_buf_use_size, bp->rx_max_ring);
4417
4418         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4419         if (bp->rx_pg_ring_size) {
4420                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4421                                      bp->rx_pg_desc_mapping,
4422                                      PAGE_SIZE, bp->rx_max_pg_ring);
4423                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4424                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4425                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4426                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4427
4428                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4429                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4430
4431                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4432                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4433
4434                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4435                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4436         }
4437
4438         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4439         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4440         val |= 0x02 << 8;
4441         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4442
4443         val = (u64) bp->rx_desc_mapping[0] >> 32;
4444         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4445
4446         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4447         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4448
4449         ring_prod = prod = bp->rx_pg_prod;
4450         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4451                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4452                         break;
4453                 prod = NEXT_RX_BD(prod);
4454                 ring_prod = RX_PG_RING_IDX(prod);
4455         }
4456         bp->rx_pg_prod = prod;
4457
4458         ring_prod = prod = bp->rx_prod;
4459         for (i = 0; i < bp->rx_ring_size; i++) {
4460                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4461                         break;
4462                 }
4463                 prod = NEXT_RX_BD(prod);
4464                 ring_prod = RX_RING_IDX(prod);
4465         }
4466         bp->rx_prod = prod;
4467
4468         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4469         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4470
4471         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4472 }
4473
4474 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4475 {
4476         u32 max, num_rings = 1;
4477
4478         while (ring_size > MAX_RX_DESC_CNT) {
4479                 ring_size -= MAX_RX_DESC_CNT;
4480                 num_rings++;
4481         }
4482         /* round to next power of 2 */
4483         max = max_size;
4484         while ((max & num_rings) == 0)
4485                 max >>= 1;
4486
4487         if (num_rings != max)
4488                 max <<= 1;
4489
4490         return max;
4491 }
4492
4493 static void
4494 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4495 {
4496         u32 rx_size, rx_space, jumbo_size;
4497
4498         /* 8 for CRC and VLAN */
4499         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4500
4501         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4502                 sizeof(struct skb_shared_info);
4503
4504         bp->rx_copy_thresh = RX_COPY_THRESH;
4505         bp->rx_pg_ring_size = 0;
4506         bp->rx_max_pg_ring = 0;
4507         bp->rx_max_pg_ring_idx = 0;
4508         if (rx_space > PAGE_SIZE) {
4509                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4510
4511                 jumbo_size = size * pages;
4512                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4513                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4514
4515                 bp->rx_pg_ring_size = jumbo_size;
4516                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4517                                                         MAX_RX_PG_RINGS);
4518                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4519                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4520                 bp->rx_copy_thresh = 0;
4521         }
4522
4523         bp->rx_buf_use_size = rx_size;
4524         /* hw alignment */
4525         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4526         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4527         bp->rx_ring_size = size;
4528         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4529         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4530 }
4531
4532 static void
4533 bnx2_free_tx_skbs(struct bnx2 *bp)
4534 {
4535         int i;
4536
4537         if (bp->tx_buf_ring == NULL)
4538                 return;
4539
4540         for (i = 0; i < TX_DESC_CNT; ) {
4541                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4542                 struct sk_buff *skb = tx_buf->skb;
4543                 int j, last;
4544
4545                 if (skb == NULL) {
4546                         i++;
4547                         continue;
4548                 }
4549
4550                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4551                         skb_headlen(skb), PCI_DMA_TODEVICE);
4552
4553                 tx_buf->skb = NULL;
4554
4555                 last = skb_shinfo(skb)->nr_frags;
4556                 for (j = 0; j < last; j++) {
4557                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4558                         pci_unmap_page(bp->pdev,
4559                                 pci_unmap_addr(tx_buf, mapping),
4560                                 skb_shinfo(skb)->frags[j].size,
4561                                 PCI_DMA_TODEVICE);
4562                 }
4563                 dev_kfree_skb(skb);
4564                 i += j + 1;
4565         }
4566
4567 }
4568
4569 static void
4570 bnx2_free_rx_skbs(struct bnx2 *bp)
4571 {
4572         int i;
4573
4574         if (bp->rx_buf_ring == NULL)
4575                 return;
4576
4577         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4578                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4579                 struct sk_buff *skb = rx_buf->skb;
4580
4581                 if (skb == NULL)
4582                         continue;
4583
4584                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4585                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4586
4587                 rx_buf->skb = NULL;
4588
4589                 dev_kfree_skb(skb);
4590         }
4591         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4592                 bnx2_free_rx_page(bp, i);
4593 }
4594
4595 static void
4596 bnx2_free_skbs(struct bnx2 *bp)
4597 {
4598         bnx2_free_tx_skbs(bp);
4599         bnx2_free_rx_skbs(bp);
4600 }
4601
4602 static int
4603 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4604 {
4605         int rc;
4606
4607         rc = bnx2_reset_chip(bp, reset_code);
4608         bnx2_free_skbs(bp);
4609         if (rc)
4610                 return rc;
4611
4612         if ((rc = bnx2_init_chip(bp)) != 0)
4613                 return rc;
4614
4615         bnx2_init_tx_ring(bp);
4616         bnx2_init_rx_ring(bp);
4617         return 0;
4618 }
4619
4620 static int
4621 bnx2_init_nic(struct bnx2 *bp)
4622 {
4623         int rc;
4624
4625         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4626                 return rc;
4627
4628         spin_lock_bh(&bp->phy_lock);
4629         bnx2_init_phy(bp);
4630         bnx2_set_link(bp);
4631         spin_unlock_bh(&bp->phy_lock);
4632         return 0;
4633 }
4634
4635 static int
4636 bnx2_test_registers(struct bnx2 *bp)
4637 {
4638         int ret;
4639         int i, is_5709;
4640         static const struct {
4641                 u16   offset;
4642                 u16   flags;
4643 #define BNX2_FL_NOT_5709        1
4644                 u32   rw_mask;
4645                 u32   ro_mask;
4646         } reg_tbl[] = {
4647                 { 0x006c, 0, 0x00000000, 0x0000003f },
4648                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4649                 { 0x0094, 0, 0x00000000, 0x00000000 },
4650
4651                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4652                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4653                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4654                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4655                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4656                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4657                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4658                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4659                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4660
4661                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4662                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4664                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4665                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4666                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4667
4668                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4669                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4670                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4671
4672                 { 0x1000, 0, 0x00000000, 0x00000001 },
4673                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4674
4675                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4676                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4677                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4678                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4679                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4680                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4681                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4682                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4683                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4684                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4685
4686                 { 0x1800, 0, 0x00000000, 0x00000001 },
4687                 { 0x1804, 0, 0x00000000, 0x00000003 },
4688
4689                 { 0x2800, 0, 0x00000000, 0x00000001 },
4690                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4691                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4692                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4693                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4694                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4695                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4696                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4697                 { 0x2840, 0, 0x00000000, 0xffffffff },
4698                 { 0x2844, 0, 0x00000000, 0xffffffff },
4699                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4700                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4701
4702                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4703                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4704
4705                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4706                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4707                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4708                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4709                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4710                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4711                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4712                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4713                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4714
4715                 { 0x5004, 0, 0x00000000, 0x0000007f },
4716                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4717
4718                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4719                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4720                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4721                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4722                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4723                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4724                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4725                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4726                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4727
4728                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4729                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4730                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4731                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4732                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4733                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4734                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4735                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4736                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4737                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4738                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4739                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4740                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4741                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4742                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4743                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4744                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4745                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4746                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4747                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4748                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4749                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4750                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4751
4752                 { 0xffff, 0, 0x00000000, 0x00000000 },
4753         };
4754
4755         ret = 0;
4756         is_5709 = 0;
4757         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4758                 is_5709 = 1;
4759
4760         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4761                 u32 offset, rw_mask, ro_mask, save_val, val;
4762                 u16 flags = reg_tbl[i].flags;
4763
4764                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4765                         continue;
4766
4767                 offset = (u32) reg_tbl[i].offset;
4768                 rw_mask = reg_tbl[i].rw_mask;
4769                 ro_mask = reg_tbl[i].ro_mask;
4770
4771                 save_val = readl(bp->regview + offset);
4772
4773                 writel(0, bp->regview + offset);
4774
4775                 val = readl(bp->regview + offset);
4776                 if ((val & rw_mask) != 0) {
4777                         goto reg_test_err;
4778                 }
4779
4780                 if ((val & ro_mask) != (save_val & ro_mask)) {
4781                         goto reg_test_err;
4782                 }
4783
4784                 writel(0xffffffff, bp->regview + offset);
4785
4786                 val = readl(bp->regview + offset);
4787                 if ((val & rw_mask) != rw_mask) {
4788                         goto reg_test_err;
4789                 }
4790
4791                 if ((val & ro_mask) != (save_val & ro_mask)) {
4792                         goto reg_test_err;
4793                 }
4794
4795                 writel(save_val, bp->regview + offset);
4796                 continue;
4797
4798 reg_test_err:
4799                 writel(save_val, bp->regview + offset);
4800                 ret = -ENODEV;
4801                 break;
4802         }
4803         return ret;
4804 }
4805
4806 static int
4807 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4808 {
4809         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4810                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4811         int i;
4812
4813         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4814                 u32 offset;
4815
4816                 for (offset = 0; offset < size; offset += 4) {
4817
4818                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4819
4820                         if (REG_RD_IND(bp, start + offset) !=
4821                                 test_pattern[i]) {
4822                                 return -ENODEV;
4823                         }
4824                 }
4825         }
4826         return 0;
4827 }
4828
4829 static int
4830 bnx2_test_memory(struct bnx2 *bp)
4831 {
4832         int ret = 0;
4833         int i;
4834         static struct mem_entry {
4835                 u32   offset;
4836                 u32   len;
4837         } mem_tbl_5706[] = {
4838                 { 0x60000,  0x4000 },
4839                 { 0xa0000,  0x3000 },
4840                 { 0xe0000,  0x4000 },
4841                 { 0x120000, 0x4000 },
4842                 { 0x1a0000, 0x4000 },
4843                 { 0x160000, 0x4000 },
4844                 { 0xffffffff, 0    },
4845         },
4846         mem_tbl_5709[] = {
4847                 { 0x60000,  0x4000 },
4848                 { 0xa0000,  0x3000 },
4849                 { 0xe0000,  0x4000 },
4850                 { 0x120000, 0x4000 },
4851                 { 0x1a0000, 0x4000 },
4852                 { 0xffffffff, 0    },
4853         };
4854         struct mem_entry *mem_tbl;
4855
4856         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4857                 mem_tbl = mem_tbl_5709;
4858         else
4859                 mem_tbl = mem_tbl_5706;
4860
4861         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4862                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4863                         mem_tbl[i].len)) != 0) {
4864                         return ret;
4865                 }
4866         }
4867
4868         return ret;
4869 }
4870
4871 #define BNX2_MAC_LOOPBACK       0
4872 #define BNX2_PHY_LOOPBACK       1
4873
4874 static int
4875 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4876 {
4877         unsigned int pkt_size, num_pkts, i;
4878         struct sk_buff *skb, *rx_skb;
4879         unsigned char *packet;
4880         u16 rx_start_idx, rx_idx;
4881         dma_addr_t map;
4882         struct tx_bd *txbd;
4883         struct sw_bd *rx_buf;
4884         struct l2_fhdr *rx_hdr;
4885         int ret = -ENODEV;
4886
4887         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4888                 bp->loopback = MAC_LOOPBACK;
4889                 bnx2_set_mac_loopback(bp);
4890         }
4891         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4892                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4893                         return 0;
4894
4895                 bp->loopback = PHY_LOOPBACK;
4896                 bnx2_set_phy_loopback(bp);
4897         }
4898         else
4899                 return -EINVAL;
4900
4901         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4902         skb = netdev_alloc_skb(bp->dev, pkt_size);
4903         if (!skb)
4904                 return -ENOMEM;
4905         packet = skb_put(skb, pkt_size);
4906         memcpy(packet, bp->dev->dev_addr, 6);
4907         memset(packet + 6, 0x0, 8);
4908         for (i = 14; i < pkt_size; i++)
4909                 packet[i] = (unsigned char) (i & 0xff);
4910
4911         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4912                 PCI_DMA_TODEVICE);
4913
4914         REG_WR(bp, BNX2_HC_COMMAND,
4915                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4916
4917         REG_RD(bp, BNX2_HC_COMMAND);
4918
4919         udelay(5);
4920         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4921
4922         num_pkts = 0;
4923
4924         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4925
4926         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4927         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4928         txbd->tx_bd_mss_nbytes = pkt_size;
4929         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4930
4931         num_pkts++;
4932         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4933         bp->tx_prod_bseq += pkt_size;
4934
4935         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4936         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4937
4938         udelay(100);
4939
4940         REG_WR(bp, BNX2_HC_COMMAND,
4941                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4942
4943         REG_RD(bp, BNX2_HC_COMMAND);
4944
4945         udelay(5);
4946
4947         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4948         dev_kfree_skb(skb);
4949
4950         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4951                 goto loopback_test_done;
4952         }
4953
4954         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4955         if (rx_idx != rx_start_idx + num_pkts) {
4956                 goto loopback_test_done;
4957         }
4958
4959         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4960         rx_skb = rx_buf->skb;
4961
4962         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4963         skb_reserve(rx_skb, bp->rx_offset);
4964
4965         pci_dma_sync_single_for_cpu(bp->pdev,
4966                 pci_unmap_addr(rx_buf, mapping),
4967                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4968
4969         if (rx_hdr->l2_fhdr_status &
4970                 (L2_FHDR_ERRORS_BAD_CRC |
4971                 L2_FHDR_ERRORS_PHY_DECODE |
4972                 L2_FHDR_ERRORS_ALIGNMENT |
4973                 L2_FHDR_ERRORS_TOO_SHORT |
4974                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4975
4976                 goto loopback_test_done;
4977         }
4978
4979         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4980                 goto loopback_test_done;
4981         }
4982
4983         for (i = 14; i < pkt_size; i++) {
4984                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4985                         goto loopback_test_done;
4986                 }
4987         }
4988
4989         ret = 0;
4990
4991 loopback_test_done:
4992         bp->loopback = 0;
4993         return ret;
4994 }
4995
4996 #define BNX2_MAC_LOOPBACK_FAILED        1
4997 #define BNX2_PHY_LOOPBACK_FAILED        2
4998 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4999                                          BNX2_PHY_LOOPBACK_FAILED)
5000
5001 static int
5002 bnx2_test_loopback(struct bnx2 *bp)
5003 {
5004         int rc = 0;
5005
5006         if (!netif_running(bp->dev))
5007                 return BNX2_LOOPBACK_FAILED;
5008
5009         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5010         spin_lock_bh(&bp->phy_lock);
5011         bnx2_init_phy(bp);
5012         spin_unlock_bh(&bp->phy_lock);
5013         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5014                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5015         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5016                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5017         return rc;
5018 }
5019
5020 #define NVRAM_SIZE 0x200
5021 #define CRC32_RESIDUAL 0xdebb20e3
5022
5023 static int
5024 bnx2_test_nvram(struct bnx2 *bp)
5025 {
5026         u32 buf[NVRAM_SIZE / 4];
5027         u8 *data = (u8 *) buf;
5028         int rc = 0;
5029         u32 magic, csum;
5030
5031         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5032                 goto test_nvram_done;
5033
5034         magic = be32_to_cpu(buf[0]);
5035         if (magic != 0x669955aa) {
5036                 rc = -ENODEV;
5037                 goto test_nvram_done;
5038         }
5039
5040         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5041                 goto test_nvram_done;
5042
5043         csum = ether_crc_le(0x100, data);
5044         if (csum != CRC32_RESIDUAL) {
5045                 rc = -ENODEV;
5046                 goto test_nvram_done;
5047         }
5048
5049         csum = ether_crc_le(0x100, data + 0x100);
5050         if (csum != CRC32_RESIDUAL) {
5051                 rc = -ENODEV;
5052         }
5053
5054 test_nvram_done:
5055         return rc;
5056 }
5057
5058 static int
5059 bnx2_test_link(struct bnx2 *bp)
5060 {
5061         u32 bmsr;
5062
5063         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5064                 if (bp->link_up)
5065                         return 0;
5066                 return -ENODEV;
5067         }
5068         spin_lock_bh(&bp->phy_lock);
5069         bnx2_enable_bmsr1(bp);
5070         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5071         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5072         bnx2_disable_bmsr1(bp);
5073         spin_unlock_bh(&bp->phy_lock);
5074
5075         if (bmsr & BMSR_LSTATUS) {
5076                 return 0;
5077         }
5078         return -ENODEV;
5079 }
5080
5081 static int
5082 bnx2_test_intr(struct bnx2 *bp)
5083 {
5084         int i;
5085         u16 status_idx;
5086
5087         if (!netif_running(bp->dev))
5088                 return -ENODEV;
5089
5090         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5091
5092         /* This register is not touched during run-time. */
5093         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5094         REG_RD(bp, BNX2_HC_COMMAND);
5095
5096         for (i = 0; i < 10; i++) {
5097                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5098                         status_idx) {
5099
5100                         break;
5101                 }
5102
5103                 msleep_interruptible(10);
5104         }
5105         if (i < 10)
5106                 return 0;
5107
5108         return -ENODEV;
5109 }
5110
5111 static void
5112 bnx2_5706_serdes_timer(struct bnx2 *bp)
5113 {
5114         spin_lock(&bp->phy_lock);
5115         if (bp->serdes_an_pending)
5116                 bp->serdes_an_pending--;
5117         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5118                 u32 bmcr;
5119
5120                 bp->current_interval = bp->timer_interval;
5121
5122                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5123
5124                 if (bmcr & BMCR_ANENABLE) {
5125                         u32 phy1, phy2;
5126
5127                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5128                         bnx2_read_phy(bp, 0x1c, &phy1);
5129
5130                         bnx2_write_phy(bp, 0x17, 0x0f01);
5131                         bnx2_read_phy(bp, 0x15, &phy2);
5132                         bnx2_write_phy(bp, 0x17, 0x0f01);
5133                         bnx2_read_phy(bp, 0x15, &phy2);
5134
5135                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5136                                 !(phy2 & 0x20)) {       /* no CONFIG */
5137
5138                                 bmcr &= ~BMCR_ANENABLE;
5139                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5140                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5141                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5142                         }
5143                 }
5144         }
5145         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5146                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5147                 u32 phy2;
5148
5149                 bnx2_write_phy(bp, 0x17, 0x0f01);
5150                 bnx2_read_phy(bp, 0x15, &phy2);
5151                 if (phy2 & 0x20) {
5152                         u32 bmcr;
5153
5154                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5155                         bmcr |= BMCR_ANENABLE;
5156                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5157
5158                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5159                 }
5160         } else
5161                 bp->current_interval = bp->timer_interval;
5162
5163         spin_unlock(&bp->phy_lock);
5164 }
5165
5166 static void
5167 bnx2_5708_serdes_timer(struct bnx2 *bp)
5168 {
5169         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5170                 return;
5171
5172         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5173                 bp->serdes_an_pending = 0;
5174                 return;
5175         }
5176
5177         spin_lock(&bp->phy_lock);
5178         if (bp->serdes_an_pending)
5179                 bp->serdes_an_pending--;
5180         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5181                 u32 bmcr;
5182
5183                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5184                 if (bmcr & BMCR_ANENABLE) {
5185                         bnx2_enable_forced_2g5(bp);
5186                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5187                 } else {
5188                         bnx2_disable_forced_2g5(bp);
5189                         bp->serdes_an_pending = 2;
5190                         bp->current_interval = bp->timer_interval;
5191                 }
5192
5193         } else
5194                 bp->current_interval = bp->timer_interval;
5195
5196         spin_unlock(&bp->phy_lock);
5197 }
5198
5199 static void
5200 bnx2_timer(unsigned long data)
5201 {
5202         struct bnx2 *bp = (struct bnx2 *) data;
5203
5204         if (!netif_running(bp->dev))
5205                 return;
5206
5207         if (atomic_read(&bp->intr_sem) != 0)
5208                 goto bnx2_restart_timer;
5209
5210         bnx2_send_heart_beat(bp);
5211
5212         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5213
5214         /* workaround occasional corrupted counters */
5215         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5216                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5217                                             BNX2_HC_COMMAND_STATS_NOW);
5218
5219         if (bp->phy_flags & PHY_SERDES_FLAG) {
5220                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5221                         bnx2_5706_serdes_timer(bp);
5222                 else
5223                         bnx2_5708_serdes_timer(bp);
5224         }
5225
5226 bnx2_restart_timer:
5227         mod_timer(&bp->timer, jiffies + bp->current_interval);
5228 }
5229
5230 static int
5231 bnx2_request_irq(struct bnx2 *bp)
5232 {
5233         struct net_device *dev = bp->dev;
5234         int rc = 0;
5235
5236         if (bp->flags & USING_MSI_FLAG) {
5237                 irq_handler_t   fn = bnx2_msi;
5238
5239                 if (bp->flags & ONE_SHOT_MSI_FLAG)
5240                         fn = bnx2_msi_1shot;
5241
5242                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5243         } else
5244                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5245                                  IRQF_SHARED, dev->name, dev);
5246         return rc;
5247 }
5248
5249 static void
5250 bnx2_free_irq(struct bnx2 *bp)
5251 {
5252         struct net_device *dev = bp->dev;
5253
5254         if (bp->flags & USING_MSI_FLAG) {
5255                 free_irq(bp->pdev->irq, dev);
5256                 pci_disable_msi(bp->pdev);
5257                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5258         } else
5259                 free_irq(bp->pdev->irq, dev);
5260 }
5261
5262 /* Called with rtnl_lock */
5263 static int
5264 bnx2_open(struct net_device *dev)
5265 {
5266         struct bnx2 *bp = netdev_priv(dev);
5267         int rc;
5268
5269         netif_carrier_off(dev);
5270
5271         bnx2_set_power_state(bp, PCI_D0);
5272         bnx2_disable_int(bp);
5273
5274         rc = bnx2_alloc_mem(bp);
5275         if (rc)
5276                 return rc;
5277
5278         napi_enable(&bp->napi);
5279
5280         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5281                 if (pci_enable_msi(bp->pdev) == 0) {
5282                         bp->flags |= USING_MSI_FLAG;
5283                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5284                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5285                 }
5286         }
5287         rc = bnx2_request_irq(bp);
5288
5289         if (rc) {
5290                 napi_disable(&bp->napi);
5291                 bnx2_free_mem(bp);
5292                 return rc;
5293         }
5294
5295         rc = bnx2_init_nic(bp);
5296
5297         if (rc) {
5298                 napi_disable(&bp->napi);
5299                 bnx2_free_irq(bp);
5300                 bnx2_free_skbs(bp);
5301                 bnx2_free_mem(bp);
5302                 return rc;
5303         }
5304
5305         mod_timer(&bp->timer, jiffies + bp->current_interval);
5306
5307         atomic_set(&bp->intr_sem, 0);
5308
5309         bnx2_enable_int(bp);
5310
5311         if (bp->flags & USING_MSI_FLAG) {
5312                 /* Test MSI to make sure it is working
5313                  * If MSI test fails, go back to INTx mode
5314                  */
5315                 if (bnx2_test_intr(bp) != 0) {
5316                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5317                                " using MSI, switching to INTx mode. Please"
5318                                " report this failure to the PCI maintainer"
5319                                " and include system chipset information.\n",
5320                                bp->dev->name);
5321
5322                         bnx2_disable_int(bp);
5323                         bnx2_free_irq(bp);
5324
5325                         rc = bnx2_init_nic(bp);
5326
5327                         if (!rc)
5328                                 rc = bnx2_request_irq(bp);
5329
5330                         if (rc) {
5331                                 napi_disable(&bp->napi);
5332                                 bnx2_free_skbs(bp);
5333                                 bnx2_free_mem(bp);
5334                                 del_timer_sync(&bp->timer);
5335                                 return rc;
5336                         }
5337                         bnx2_enable_int(bp);
5338                 }
5339         }
5340         if (bp->flags & USING_MSI_FLAG) {
5341                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5342         }
5343
5344         netif_start_queue(dev);
5345
5346         return 0;
5347 }
5348
5349 static void
5350 bnx2_reset_task(struct work_struct *work)
5351 {
5352         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5353
5354         if (!netif_running(bp->dev))
5355                 return;
5356
5357         bp->in_reset_task = 1;
5358         bnx2_netif_stop(bp);
5359
5360         bnx2_init_nic(bp);
5361
5362         atomic_set(&bp->intr_sem, 1);
5363         bnx2_netif_start(bp);
5364         bp->in_reset_task = 0;
5365 }
5366
5367 static void
5368 bnx2_tx_timeout(struct net_device *dev)
5369 {
5370         struct bnx2 *bp = netdev_priv(dev);
5371
5372         /* This allows the netif to be shutdown gracefully before resetting */
5373         schedule_work(&bp->reset_task);
5374 }
5375
5376 #ifdef BCM_VLAN
5377 /* Called with rtnl_lock */
5378 static void
5379 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5380 {
5381         struct bnx2 *bp = netdev_priv(dev);
5382
5383         bnx2_netif_stop(bp);
5384
5385         bp->vlgrp = vlgrp;
5386         bnx2_set_rx_mode(dev);
5387
5388         bnx2_netif_start(bp);
5389 }
5390 #endif
5391
5392 /* Called with netif_tx_lock.
5393  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5394  * netif_wake_queue().
5395  */
5396 static int
5397 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5398 {
5399         struct bnx2 *bp = netdev_priv(dev);
5400         dma_addr_t mapping;
5401         struct tx_bd *txbd;
5402         struct sw_bd *tx_buf;
5403         u32 len, vlan_tag_flags, last_frag, mss;
5404         u16 prod, ring_prod;
5405         int i;
5406
5407         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5408                 netif_stop_queue(dev);
5409                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5410                         dev->name);
5411
5412                 return NETDEV_TX_BUSY;
5413         }
5414         len = skb_headlen(skb);
5415         prod = bp->tx_prod;
5416         ring_prod = TX_RING_IDX(prod);
5417
5418         vlan_tag_flags = 0;
5419         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5420                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5421         }
5422
5423         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5424                 vlan_tag_flags |=
5425                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5426         }
5427         if ((mss = skb_shinfo(skb)->gso_size)) {
5428                 u32 tcp_opt_len, ip_tcp_len;
5429                 struct iphdr *iph;
5430
5431                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5432
5433                 tcp_opt_len = tcp_optlen(skb);
5434
5435                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5436                         u32 tcp_off = skb_transport_offset(skb) -
5437                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5438
5439                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5440                                           TX_BD_FLAGS_SW_FLAGS;
5441                         if (likely(tcp_off == 0))
5442                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5443                         else {
5444                                 tcp_off >>= 3;
5445                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5446                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5447                                                   ((tcp_off & 0x10) <<
5448                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5449                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5450                         }
5451                 } else {
5452                         if (skb_header_cloned(skb) &&
5453                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5454                                 dev_kfree_skb(skb);
5455                                 return NETDEV_TX_OK;
5456                         }
5457
5458                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5459
5460                         iph = ip_hdr(skb);
5461                         iph->check = 0;
5462                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5463                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5464                                                                  iph->daddr, 0,
5465                                                                  IPPROTO_TCP,
5466                                                                  0);
5467                         if (tcp_opt_len || (iph->ihl > 5)) {
5468                                 vlan_tag_flags |= ((iph->ihl - 5) +
5469                                                    (tcp_opt_len >> 2)) << 8;
5470                         }
5471                 }
5472         } else
5473                 mss = 0;
5474
5475         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5476
5477         tx_buf = &bp->tx_buf_ring[ring_prod];
5478         tx_buf->skb = skb;
5479         pci_unmap_addr_set(tx_buf, mapping, mapping);
5480
5481         txbd = &bp->tx_desc_ring[ring_prod];
5482
5483         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5484         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5485         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5486         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5487
5488         last_frag = skb_shinfo(skb)->nr_frags;
5489
5490         for (i = 0; i < last_frag; i++) {
5491                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5492
5493                 prod = NEXT_TX_BD(prod);
5494                 ring_prod = TX_RING_IDX(prod);
5495                 txbd = &bp->tx_desc_ring[ring_prod];
5496
5497                 len = frag->size;
5498                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5499                         len, PCI_DMA_TODEVICE);
5500                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5501                                 mapping, mapping);
5502
5503                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5504                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5505                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5506                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5507
5508         }
5509         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5510
5511         prod = NEXT_TX_BD(prod);
5512         bp->tx_prod_bseq += skb->len;
5513
5514         REG_WR16(bp, bp->tx_bidx_addr, prod);
5515         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5516
5517         mmiowb();
5518
5519         bp->tx_prod = prod;
5520         dev->trans_start = jiffies;
5521
5522         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5523                 netif_stop_queue(dev);
5524                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5525                         netif_wake_queue(dev);
5526         }
5527
5528         return NETDEV_TX_OK;
5529 }
5530
5531 /* Called with rtnl_lock */
5532 static int
5533 bnx2_close(struct net_device *dev)
5534 {
5535         struct bnx2 *bp = netdev_priv(dev);
5536         u32 reset_code;
5537
5538         /* Calling flush_scheduled_work() may deadlock because
5539          * linkwatch_event() may be on the workqueue and it will try to get
5540          * the rtnl_lock which we are holding.
5541          */
5542         while (bp->in_reset_task)
5543                 msleep(1);
5544
5545         bnx2_disable_int_sync(bp);
5546         napi_disable(&bp->napi);
5547         del_timer_sync(&bp->timer);
5548         if (bp->flags & NO_WOL_FLAG)
5549                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5550         else if (bp->wol)
5551                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5552         else
5553                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5554         bnx2_reset_chip(bp, reset_code);
5555         bnx2_free_irq(bp);
5556         bnx2_free_skbs(bp);
5557         bnx2_free_mem(bp);
5558         bp->link_up = 0;
5559         netif_carrier_off(bp->dev);
5560         bnx2_set_power_state(bp, PCI_D3hot);
5561         return 0;
5562 }
5563
5564 #define GET_NET_STATS64(ctr)                                    \
5565         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5566         (unsigned long) (ctr##_lo)
5567
5568 #define GET_NET_STATS32(ctr)            \
5569         (ctr##_lo)
5570
5571 #if (BITS_PER_LONG == 64)
5572 #define GET_NET_STATS   GET_NET_STATS64
5573 #else
5574 #define GET_NET_STATS   GET_NET_STATS32
5575 #endif
5576
5577 static struct net_device_stats *
5578 bnx2_get_stats(struct net_device *dev)
5579 {
5580         struct bnx2 *bp = netdev_priv(dev);
5581         struct statistics_block *stats_blk = bp->stats_blk;
5582         struct net_device_stats *net_stats = &bp->net_stats;
5583
5584         if (bp->stats_blk == NULL) {
5585                 return net_stats;
5586         }
5587         net_stats->rx_packets =
5588                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5589                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5590                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5591
5592         net_stats->tx_packets =
5593                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5594                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5595                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5596
5597         net_stats->rx_bytes =
5598                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5599
5600         net_stats->tx_bytes =
5601                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5602
5603         net_stats->multicast =
5604                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5605
5606         net_stats->collisions =
5607                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5608
5609         net_stats->rx_length_errors =
5610                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5611                 stats_blk->stat_EtherStatsOverrsizePkts);
5612
5613         net_stats->rx_over_errors =
5614                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5615
5616         net_stats->rx_frame_errors =
5617                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5618
5619         net_stats->rx_crc_errors =
5620                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5621
5622         net_stats->rx_errors = net_stats->rx_length_errors +
5623                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5624                 net_stats->rx_crc_errors;
5625
5626         net_stats->tx_aborted_errors =
5627                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5628                 stats_blk->stat_Dot3StatsLateCollisions);
5629
5630         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5631             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5632                 net_stats->tx_carrier_errors = 0;
5633         else {
5634                 net_stats->tx_carrier_errors =
5635                         (unsigned long)
5636                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5637         }
5638
5639         net_stats->tx_errors =
5640                 (unsigned long)
5641                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5642                 +
5643                 net_stats->tx_aborted_errors +
5644                 net_stats->tx_carrier_errors;
5645
5646         net_stats->rx_missed_errors =
5647                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5648                 stats_blk->stat_FwRxDrop);
5649
5650         return net_stats;
5651 }
5652
5653 /* All ethtool functions called with rtnl_lock */
5654
5655 static int
5656 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5657 {
5658         struct bnx2 *bp = netdev_priv(dev);
5659         int support_serdes = 0, support_copper = 0;
5660
5661         cmd->supported = SUPPORTED_Autoneg;
5662         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5663                 support_serdes = 1;
5664                 support_copper = 1;
5665         } else if (bp->phy_port == PORT_FIBRE)
5666                 support_serdes = 1;
5667         else
5668                 support_copper = 1;
5669
5670         if (support_serdes) {
5671                 cmd->supported |= SUPPORTED_1000baseT_Full |
5672                         SUPPORTED_FIBRE;
5673                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5674                         cmd->supported |= SUPPORTED_2500baseX_Full;
5675
5676         }
5677         if (support_copper) {
5678                 cmd->supported |= SUPPORTED_10baseT_Half |
5679                         SUPPORTED_10baseT_Full |
5680                         SUPPORTED_100baseT_Half |
5681                         SUPPORTED_100baseT_Full |
5682                         SUPPORTED_1000baseT_Full |
5683                         SUPPORTED_TP;
5684
5685         }
5686
5687         spin_lock_bh(&bp->phy_lock);
5688         cmd->port = bp->phy_port;
5689         cmd->advertising = bp->advertising;
5690
5691         if (bp->autoneg & AUTONEG_SPEED) {
5692                 cmd->autoneg = AUTONEG_ENABLE;
5693         }
5694         else {
5695                 cmd->autoneg = AUTONEG_DISABLE;
5696         }
5697
5698         if (netif_carrier_ok(dev)) {
5699                 cmd->speed = bp->line_speed;
5700                 cmd->duplex = bp->duplex;
5701         }
5702         else {
5703                 cmd->speed = -1;
5704                 cmd->duplex = -1;
5705         }
5706         spin_unlock_bh(&bp->phy_lock);
5707
5708         cmd->transceiver = XCVR_INTERNAL;
5709         cmd->phy_address = bp->phy_addr;
5710
5711         return 0;
5712 }
5713
5714 static int
5715 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5716 {
5717         struct bnx2 *bp = netdev_priv(dev);
5718         u8 autoneg = bp->autoneg;
5719         u8 req_duplex = bp->req_duplex;
5720         u16 req_line_speed = bp->req_line_speed;
5721         u32 advertising = bp->advertising;
5722         int err = -EINVAL;
5723
5724         spin_lock_bh(&bp->phy_lock);
5725
5726         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5727                 goto err_out_unlock;
5728
5729         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5730                 goto err_out_unlock;
5731
5732         if (cmd->autoneg == AUTONEG_ENABLE) {
5733                 autoneg |= AUTONEG_SPEED;
5734
5735                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5736
5737                 /* allow advertising 1 speed */
5738                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5739                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5740                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5741                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5742
5743                         if (cmd->port == PORT_FIBRE)
5744                                 goto err_out_unlock;
5745
5746                         advertising = cmd->advertising;
5747
5748                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5749                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5750                             (cmd->port == PORT_TP))
5751                                 goto err_out_unlock;
5752                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5753                         advertising = cmd->advertising;
5754                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5755                         goto err_out_unlock;
5756                 else {
5757                         if (cmd->port == PORT_FIBRE)
5758                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5759                         else
5760                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5761                 }
5762                 advertising |= ADVERTISED_Autoneg;
5763         }
5764         else {
5765                 if (cmd->port == PORT_FIBRE) {
5766                         if ((cmd->speed != SPEED_1000 &&
5767                              cmd->speed != SPEED_2500) ||
5768                             (cmd->duplex != DUPLEX_FULL))
5769                                 goto err_out_unlock;
5770
5771                         if (cmd->speed == SPEED_2500 &&
5772                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5773                                 goto err_out_unlock;
5774                 }
5775                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5776                         goto err_out_unlock;
5777
5778                 autoneg &= ~AUTONEG_SPEED;
5779                 req_line_speed = cmd->speed;
5780                 req_duplex = cmd->duplex;
5781                 advertising = 0;
5782         }
5783
5784         bp->autoneg = autoneg;
5785         bp->advertising = advertising;
5786         bp->req_line_speed = req_line_speed;
5787         bp->req_duplex = req_duplex;
5788
5789         err = bnx2_setup_phy(bp, cmd->port);
5790
5791 err_out_unlock:
5792         spin_unlock_bh(&bp->phy_lock);
5793
5794         return err;
5795 }
5796
5797 static void
5798 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5799 {
5800         struct bnx2 *bp = netdev_priv(dev);
5801
5802         strcpy(info->driver, DRV_MODULE_NAME);
5803         strcpy(info->version, DRV_MODULE_VERSION);
5804         strcpy(info->bus_info, pci_name(bp->pdev));
5805         strcpy(info->fw_version, bp->fw_version);
5806 }
5807
5808 #define BNX2_REGDUMP_LEN                (32 * 1024)
5809
5810 static int
5811 bnx2_get_regs_len(struct net_device *dev)
5812 {
5813         return BNX2_REGDUMP_LEN;
5814 }
5815
5816 static void
5817 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5818 {
5819         u32 *p = _p, i, offset;
5820         u8 *orig_p = _p;
5821         struct bnx2 *bp = netdev_priv(dev);
5822         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5823                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5824                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5825                                  0x1040, 0x1048, 0x1080, 0x10a4,
5826                                  0x1400, 0x1490, 0x1498, 0x14f0,
5827                                  0x1500, 0x155c, 0x1580, 0x15dc,
5828                                  0x1600, 0x1658, 0x1680, 0x16d8,
5829                                  0x1800, 0x1820, 0x1840, 0x1854,
5830                                  0x1880, 0x1894, 0x1900, 0x1984,
5831                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5832                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5833                                  0x2000, 0x2030, 0x23c0, 0x2400,
5834                                  0x2800, 0x2820, 0x2830, 0x2850,
5835                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5836                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5837                                  0x4080, 0x4090, 0x43c0, 0x4458,
5838                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5839                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5840                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5841                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5842                                  0x6800, 0x6848, 0x684c, 0x6860,
5843                                  0x6888, 0x6910, 0x8000 };
5844
5845         regs->version = 0;
5846
5847         memset(p, 0, BNX2_REGDUMP_LEN);
5848
5849         if (!netif_running(bp->dev))
5850                 return;
5851
5852         i = 0;
5853         offset = reg_boundaries[0];
5854         p += offset;
5855         while (offset < BNX2_REGDUMP_LEN) {
5856                 *p++ = REG_RD(bp, offset);
5857                 offset += 4;
5858                 if (offset == reg_boundaries[i + 1]) {
5859                         offset = reg_boundaries[i + 2];
5860                         p = (u32 *) (orig_p + offset);
5861                         i += 2;
5862                 }
5863         }
5864 }
5865
5866 static void
5867 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5868 {
5869         struct bnx2 *bp = netdev_priv(dev);
5870
5871         if (bp->flags & NO_WOL_FLAG) {
5872                 wol->supported = 0;
5873                 wol->wolopts = 0;
5874         }
5875         else {
5876                 wol->supported = WAKE_MAGIC;
5877                 if (bp->wol)
5878                         wol->wolopts = WAKE_MAGIC;
5879                 else
5880                         wol->wolopts = 0;
5881         }
5882         memset(&wol->sopass, 0, sizeof(wol->sopass));
5883 }
5884
5885 static int
5886 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5887 {
5888         struct bnx2 *bp = netdev_priv(dev);
5889
5890         if (wol->wolopts & ~WAKE_MAGIC)
5891                 return -EINVAL;
5892
5893         if (wol->wolopts & WAKE_MAGIC) {
5894                 if (bp->flags & NO_WOL_FLAG)
5895                         return -EINVAL;
5896
5897                 bp->wol = 1;
5898         }
5899         else {
5900                 bp->wol = 0;
5901         }
5902         return 0;
5903 }
5904
5905 static int
5906 bnx2_nway_reset(struct net_device *dev)
5907 {
5908         struct bnx2 *bp = netdev_priv(dev);
5909         u32 bmcr;
5910
5911         if (!(bp->autoneg & AUTONEG_SPEED)) {
5912                 return -EINVAL;
5913         }
5914
5915         spin_lock_bh(&bp->phy_lock);
5916
5917         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5918                 int rc;
5919
5920                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5921                 spin_unlock_bh(&bp->phy_lock);
5922                 return rc;
5923         }
5924
5925         /* Force a link down visible on the other side */
5926         if (bp->phy_flags & PHY_SERDES_FLAG) {
5927                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5928                 spin_unlock_bh(&bp->phy_lock);
5929
5930                 msleep(20);
5931
5932                 spin_lock_bh(&bp->phy_lock);
5933
5934                 bp->current_interval = SERDES_AN_TIMEOUT;
5935                 bp->serdes_an_pending = 1;
5936                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5937         }
5938
5939         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5940         bmcr &= ~BMCR_LOOPBACK;
5941         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5942
5943         spin_unlock_bh(&bp->phy_lock);
5944
5945         return 0;
5946 }
5947
5948 static int
5949 bnx2_get_eeprom_len(struct net_device *dev)
5950 {
5951         struct bnx2 *bp = netdev_priv(dev);
5952
5953         if (bp->flash_info == NULL)
5954                 return 0;
5955
5956         return (int) bp->flash_size;
5957 }
5958
5959 static int
5960 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5961                 u8 *eebuf)
5962 {
5963         struct bnx2 *bp = netdev_priv(dev);
5964         int rc;
5965
5966         /* parameters already validated in ethtool_get_eeprom */
5967
5968         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5969
5970         return rc;
5971 }
5972
5973 static int
5974 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5975                 u8 *eebuf)
5976 {
5977         struct bnx2 *bp = netdev_priv(dev);
5978         int rc;
5979
5980         /* parameters already validated in ethtool_set_eeprom */
5981
5982         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5983
5984         return rc;
5985 }
5986
5987 static int
5988 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5989 {
5990         struct bnx2 *bp = netdev_priv(dev);
5991
5992         memset(coal, 0, sizeof(struct ethtool_coalesce));
5993
5994         coal->rx_coalesce_usecs = bp->rx_ticks;
5995         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5996         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5997         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5998
5999         coal->tx_coalesce_usecs = bp->tx_ticks;
6000         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6001         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6002         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6003
6004         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6005
6006         return 0;
6007 }
6008
6009 static int
6010 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6011 {
6012         struct bnx2 *bp = netdev_priv(dev);
6013
6014         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6015         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6016
6017         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6018         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6019
6020         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6021         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6022
6023         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6024         if (bp->rx_quick_cons_trip_int > 0xff)
6025                 bp->rx_quick_cons_trip_int = 0xff;
6026
6027         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6028         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6029
6030         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6031         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6032
6033         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6034         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6035
6036         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6037         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6038                 0xff;
6039
6040         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6041         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6042                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6043                         bp->stats_ticks = USEC_PER_SEC;
6044         }
6045         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6046                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6047         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6048
6049         if (netif_running(bp->dev)) {
6050                 bnx2_netif_stop(bp);
6051                 bnx2_init_nic(bp);