[BNX2]: Introduce new bnx2_napi structure.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
412         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
414                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
415
416         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
417                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
418
419         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
420 }
421
422 static void
423 bnx2_disable_int_sync(struct bnx2 *bp)
424 {
425         atomic_inc(&bp->intr_sem);
426         bnx2_disable_int(bp);
427         synchronize_irq(bp->pdev->irq);
428 }
429
430 static void
431 bnx2_napi_disable(struct bnx2 *bp)
432 {
433         napi_disable(&bp->bnx2_napi.napi);
434 }
435
436 static void
437 bnx2_napi_enable(struct bnx2 *bp)
438 {
439         napi_enable(&bp->bnx2_napi.napi);
440 }
441
442 static void
443 bnx2_netif_stop(struct bnx2 *bp)
444 {
445         bnx2_disable_int_sync(bp);
446         if (netif_running(bp->dev)) {
447                 bnx2_napi_disable(bp);
448                 netif_tx_disable(bp->dev);
449                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450         }
451 }
452
453 static void
454 bnx2_netif_start(struct bnx2 *bp)
455 {
456         if (atomic_dec_and_test(&bp->intr_sem)) {
457                 if (netif_running(bp->dev)) {
458                         netif_wake_queue(bp->dev);
459                         bnx2_napi_enable(bp);
460                         bnx2_enable_int(bp);
461                 }
462         }
463 }
464
465 static void
466 bnx2_free_mem(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->ctx_pages; i++) {
471                 if (bp->ctx_blk[i]) {
472                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473                                             bp->ctx_blk[i],
474                                             bp->ctx_blk_mapping[i]);
475                         bp->ctx_blk[i] = NULL;
476                 }
477         }
478         if (bp->status_blk) {
479                 pci_free_consistent(bp->pdev, bp->status_stats_size,
480                                     bp->status_blk, bp->status_blk_mapping);
481                 bp->status_blk = NULL;
482                 bp->stats_blk = NULL;
483         }
484         if (bp->tx_desc_ring) {
485                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
486                                     bp->tx_desc_ring, bp->tx_desc_mapping);
487                 bp->tx_desc_ring = NULL;
488         }
489         kfree(bp->tx_buf_ring);
490         bp->tx_buf_ring = NULL;
491         for (i = 0; i < bp->rx_max_ring; i++) {
492                 if (bp->rx_desc_ring[i])
493                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
494                                             bp->rx_desc_ring[i],
495                                             bp->rx_desc_mapping[i]);
496                 bp->rx_desc_ring[i] = NULL;
497         }
498         vfree(bp->rx_buf_ring);
499         bp->rx_buf_ring = NULL;
500         for (i = 0; i < bp->rx_max_pg_ring; i++) {
501                 if (bp->rx_pg_desc_ring[i])
502                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503                                             bp->rx_pg_desc_ring[i],
504                                             bp->rx_pg_desc_mapping[i]);
505                 bp->rx_pg_desc_ring[i] = NULL;
506         }
507         if (bp->rx_pg_ring)
508                 vfree(bp->rx_pg_ring);
509         bp->rx_pg_ring = NULL;
510 }
511
512 static int
513 bnx2_alloc_mem(struct bnx2 *bp)
514 {
515         int i, status_blk_size;
516
517         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
518         if (bp->tx_buf_ring == NULL)
519                 return -ENOMEM;
520
521         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
522                                                 &bp->tx_desc_mapping);
523         if (bp->tx_desc_ring == NULL)
524                 goto alloc_mem_err;
525
526         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
527         if (bp->rx_buf_ring == NULL)
528                 goto alloc_mem_err;
529
530         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
531
532         for (i = 0; i < bp->rx_max_ring; i++) {
533                 bp->rx_desc_ring[i] =
534                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
535                                              &bp->rx_desc_mapping[i]);
536                 if (bp->rx_desc_ring[i] == NULL)
537                         goto alloc_mem_err;
538
539         }
540
541         if (bp->rx_pg_ring_size) {
542                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543                                          bp->rx_max_pg_ring);
544                 if (bp->rx_pg_ring == NULL)
545                         goto alloc_mem_err;
546
547                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548                        bp->rx_max_pg_ring);
549         }
550
551         for (i = 0; i < bp->rx_max_pg_ring; i++) {
552                 bp->rx_pg_desc_ring[i] =
553                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554                                              &bp->rx_pg_desc_mapping[i]);
555                 if (bp->rx_pg_desc_ring[i] == NULL)
556                         goto alloc_mem_err;
557
558         }
559
560         /* Combine status and statistics blocks into one allocation. */
561         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562         bp->status_stats_size = status_blk_size +
563                                 sizeof(struct statistics_block);
564
565         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
566                                               &bp->status_blk_mapping);
567         if (bp->status_blk == NULL)
568                 goto alloc_mem_err;
569
570         memset(bp->status_blk, 0, bp->status_stats_size);
571
572         bp->bnx2_napi.status_blk = bp->status_blk;
573
574         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575                                   status_blk_size);
576
577         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
578
579         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581                 if (bp->ctx_pages == 0)
582                         bp->ctx_pages = 1;
583                 for (i = 0; i < bp->ctx_pages; i++) {
584                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585                                                 BCM_PAGE_SIZE,
586                                                 &bp->ctx_blk_mapping[i]);
587                         if (bp->ctx_blk[i] == NULL)
588                                 goto alloc_mem_err;
589                 }
590         }
591         return 0;
592
593 alloc_mem_err:
594         bnx2_free_mem(bp);
595         return -ENOMEM;
596 }
597
598 static void
599 bnx2_report_fw_link(struct bnx2 *bp)
600 {
601         u32 fw_link_status = 0;
602
603         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604                 return;
605
606         if (bp->link_up) {
607                 u32 bmsr;
608
609                 switch (bp->line_speed) {
610                 case SPEED_10:
611                         if (bp->duplex == DUPLEX_HALF)
612                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
613                         else
614                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
615                         break;
616                 case SPEED_100:
617                         if (bp->duplex == DUPLEX_HALF)
618                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
619                         else
620                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
621                         break;
622                 case SPEED_1000:
623                         if (bp->duplex == DUPLEX_HALF)
624                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625                         else
626                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627                         break;
628                 case SPEED_2500:
629                         if (bp->duplex == DUPLEX_HALF)
630                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631                         else
632                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633                         break;
634                 }
635
636                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638                 if (bp->autoneg) {
639                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
641                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
643
644                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647                         else
648                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649                 }
650         }
651         else
652                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655 }
656
657 static char *
658 bnx2_xceiver_str(struct bnx2 *bp)
659 {
660         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662                  "Copper"));
663 }
664
665 static void
666 bnx2_report_link(struct bnx2 *bp)
667 {
668         if (bp->link_up) {
669                 netif_carrier_on(bp->dev);
670                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671                        bnx2_xceiver_str(bp));
672
673                 printk("%d Mbps ", bp->line_speed);
674
675                 if (bp->duplex == DUPLEX_FULL)
676                         printk("full duplex");
677                 else
678                         printk("half duplex");
679
680                 if (bp->flow_ctrl) {
681                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
682                                 printk(", receive ");
683                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
684                                         printk("& transmit ");
685                         }
686                         else {
687                                 printk(", transmit ");
688                         }
689                         printk("flow control ON");
690                 }
691                 printk("\n");
692         }
693         else {
694                 netif_carrier_off(bp->dev);
695                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696                        bnx2_xceiver_str(bp));
697         }
698
699         bnx2_report_fw_link(bp);
700 }
701
702 static void
703 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704 {
705         u32 local_adv, remote_adv;
706
707         bp->flow_ctrl = 0;
708         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
709                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711                 if (bp->duplex == DUPLEX_FULL) {
712                         bp->flow_ctrl = bp->req_flow_ctrl;
713                 }
714                 return;
715         }
716
717         if (bp->duplex != DUPLEX_FULL) {
718                 return;
719         }
720
721         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723                 u32 val;
724
725                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727                         bp->flow_ctrl |= FLOW_CTRL_TX;
728                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729                         bp->flow_ctrl |= FLOW_CTRL_RX;
730                 return;
731         }
732
733         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
735
736         if (bp->phy_flags & PHY_SERDES_FLAG) {
737                 u32 new_local_adv = 0;
738                 u32 new_remote_adv = 0;
739
740                 if (local_adv & ADVERTISE_1000XPAUSE)
741                         new_local_adv |= ADVERTISE_PAUSE_CAP;
742                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
744                 if (remote_adv & ADVERTISE_1000XPAUSE)
745                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
746                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749                 local_adv = new_local_adv;
750                 remote_adv = new_remote_adv;
751         }
752
753         /* See Table 28B-3 of 802.3ab-1999 spec. */
754         if (local_adv & ADVERTISE_PAUSE_CAP) {
755                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
757                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758                         }
759                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760                                 bp->flow_ctrl = FLOW_CTRL_RX;
761                         }
762                 }
763                 else {
764                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
765                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766                         }
767                 }
768         }
769         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773                         bp->flow_ctrl = FLOW_CTRL_TX;
774                 }
775         }
776 }
777
778 static int
779 bnx2_5709s_linkup(struct bnx2 *bp)
780 {
781         u32 val, speed;
782
783         bp->link_up = 1;
784
785         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790                 bp->line_speed = bp->req_line_speed;
791                 bp->duplex = bp->req_duplex;
792                 return 0;
793         }
794         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795         switch (speed) {
796                 case MII_BNX2_GP_TOP_AN_SPEED_10:
797                         bp->line_speed = SPEED_10;
798                         break;
799                 case MII_BNX2_GP_TOP_AN_SPEED_100:
800                         bp->line_speed = SPEED_100;
801                         break;
802                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804                         bp->line_speed = SPEED_1000;
805                         break;
806                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807                         bp->line_speed = SPEED_2500;
808                         break;
809         }
810         if (val & MII_BNX2_GP_TOP_AN_FD)
811                 bp->duplex = DUPLEX_FULL;
812         else
813                 bp->duplex = DUPLEX_HALF;
814         return 0;
815 }
816
817 static int
818 bnx2_5708s_linkup(struct bnx2 *bp)
819 {
820         u32 val;
821
822         bp->link_up = 1;
823         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825                 case BCM5708S_1000X_STAT1_SPEED_10:
826                         bp->line_speed = SPEED_10;
827                         break;
828                 case BCM5708S_1000X_STAT1_SPEED_100:
829                         bp->line_speed = SPEED_100;
830                         break;
831                 case BCM5708S_1000X_STAT1_SPEED_1G:
832                         bp->line_speed = SPEED_1000;
833                         break;
834                 case BCM5708S_1000X_STAT1_SPEED_2G5:
835                         bp->line_speed = SPEED_2500;
836                         break;
837         }
838         if (val & BCM5708S_1000X_STAT1_FD)
839                 bp->duplex = DUPLEX_FULL;
840         else
841                 bp->duplex = DUPLEX_HALF;
842
843         return 0;
844 }
845
846 static int
847 bnx2_5706s_linkup(struct bnx2 *bp)
848 {
849         u32 bmcr, local_adv, remote_adv, common;
850
851         bp->link_up = 1;
852         bp->line_speed = SPEED_1000;
853
854         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
855         if (bmcr & BMCR_FULLDPLX) {
856                 bp->duplex = DUPLEX_FULL;
857         }
858         else {
859                 bp->duplex = DUPLEX_HALF;
860         }
861
862         if (!(bmcr & BMCR_ANENABLE)) {
863                 return 0;
864         }
865
866         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868
869         common = local_adv & remote_adv;
870         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872                 if (common & ADVERTISE_1000XFULL) {
873                         bp->duplex = DUPLEX_FULL;
874                 }
875                 else {
876                         bp->duplex = DUPLEX_HALF;
877                 }
878         }
879
880         return 0;
881 }
882
883 static int
884 bnx2_copper_linkup(struct bnx2 *bp)
885 {
886         u32 bmcr;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_ANENABLE) {
890                 u32 local_adv, remote_adv, common;
891
892                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895                 common = local_adv & (remote_adv >> 2);
896                 if (common & ADVERTISE_1000FULL) {
897                         bp->line_speed = SPEED_1000;
898                         bp->duplex = DUPLEX_FULL;
899                 }
900                 else if (common & ADVERTISE_1000HALF) {
901                         bp->line_speed = SPEED_1000;
902                         bp->duplex = DUPLEX_HALF;
903                 }
904                 else {
905                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
907
908                         common = local_adv & remote_adv;
909                         if (common & ADVERTISE_100FULL) {
910                                 bp->line_speed = SPEED_100;
911                                 bp->duplex = DUPLEX_FULL;
912                         }
913                         else if (common & ADVERTISE_100HALF) {
914                                 bp->line_speed = SPEED_100;
915                                 bp->duplex = DUPLEX_HALF;
916                         }
917                         else if (common & ADVERTISE_10FULL) {
918                                 bp->line_speed = SPEED_10;
919                                 bp->duplex = DUPLEX_FULL;
920                         }
921                         else if (common & ADVERTISE_10HALF) {
922                                 bp->line_speed = SPEED_10;
923                                 bp->duplex = DUPLEX_HALF;
924                         }
925                         else {
926                                 bp->line_speed = 0;
927                                 bp->link_up = 0;
928                         }
929                 }
930         }
931         else {
932                 if (bmcr & BMCR_SPEED100) {
933                         bp->line_speed = SPEED_100;
934                 }
935                 else {
936                         bp->line_speed = SPEED_10;
937                 }
938                 if (bmcr & BMCR_FULLDPLX) {
939                         bp->duplex = DUPLEX_FULL;
940                 }
941                 else {
942                         bp->duplex = DUPLEX_HALF;
943                 }
944         }
945
946         return 0;
947 }
948
949 static int
950 bnx2_set_mac_link(struct bnx2 *bp)
951 {
952         u32 val;
953
954         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956                 (bp->duplex == DUPLEX_HALF)) {
957                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958         }
959
960         /* Configure the EMAC mode register. */
961         val = REG_RD(bp, BNX2_EMAC_MODE);
962
963         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
964                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
965                 BNX2_EMAC_MODE_25G_MODE);
966
967         if (bp->link_up) {
968                 switch (bp->line_speed) {
969                         case SPEED_10:
970                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
972                                         break;
973                                 }
974                                 /* fall through */
975                         case SPEED_100:
976                                 val |= BNX2_EMAC_MODE_PORT_MII;
977                                 break;
978                         case SPEED_2500:
979                                 val |= BNX2_EMAC_MODE_25G_MODE;
980                                 /* fall through */
981                         case SPEED_1000:
982                                 val |= BNX2_EMAC_MODE_PORT_GMII;
983                                 break;
984                 }
985         }
986         else {
987                 val |= BNX2_EMAC_MODE_PORT_GMII;
988         }
989
990         /* Set the MAC to operate in the appropriate duplex mode. */
991         if (bp->duplex == DUPLEX_HALF)
992                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993         REG_WR(bp, BNX2_EMAC_MODE, val);
994
995         /* Enable/disable rx PAUSE. */
996         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998         if (bp->flow_ctrl & FLOW_CTRL_RX)
999                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002         /* Enable/disable tx PAUSE. */
1003         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006         if (bp->flow_ctrl & FLOW_CTRL_TX)
1007                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010         /* Acknowledge the interrupt. */
1011         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013         return 0;
1014 }
1015
1016 static void
1017 bnx2_enable_bmsr1(struct bnx2 *bp)
1018 {
1019         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020             (CHIP_NUM(bp) == CHIP_NUM_5709))
1021                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022                                MII_BNX2_BLK_ADDR_GP_STATUS);
1023 }
1024
1025 static void
1026 bnx2_disable_bmsr1(struct bnx2 *bp)
1027 {
1028         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5709))
1030                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032 }
1033
1034 static int
1035 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036 {
1037         u32 up1;
1038         int ret = 1;
1039
1040         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041                 return 0;
1042
1043         if (bp->autoneg & AUTONEG_SPEED)
1044                 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
1046         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
1049         bnx2_read_phy(bp, bp->mii_up1, &up1);
1050         if (!(up1 & BCM5708S_UP1_2G5)) {
1051                 up1 |= BCM5708S_UP1_2G5;
1052                 bnx2_write_phy(bp, bp->mii_up1, up1);
1053                 ret = 0;
1054         }
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
1060         return ret;
1061 }
1062
1063 static int
1064 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065 {
1066         u32 up1;
1067         int ret = 0;
1068
1069         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070                 return 0;
1071
1072         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
1075         bnx2_read_phy(bp, bp->mii_up1, &up1);
1076         if (up1 & BCM5708S_UP1_2G5) {
1077                 up1 &= ~BCM5708S_UP1_2G5;
1078                 bnx2_write_phy(bp, bp->mii_up1, up1);
1079                 ret = 1;
1080         }
1081
1082         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
1086         return ret;
1087 }
1088
1089 static void
1090 bnx2_enable_forced_2g5(struct bnx2 *bp)
1091 {
1092         u32 bmcr;
1093
1094         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095                 return;
1096
1097         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098                 u32 val;
1099
1100                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1102                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1112                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114         }
1115
1116         if (bp->autoneg & AUTONEG_SPEED) {
1117                 bmcr &= ~BMCR_ANENABLE;
1118                 if (bp->req_duplex == DUPLEX_FULL)
1119                         bmcr |= BMCR_FULLDPLX;
1120         }
1121         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122 }
1123
1124 static void
1125 bnx2_disable_forced_2g5(struct bnx2 *bp)
1126 {
1127         u32 bmcr;
1128
1129         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130                 return;
1131
1132         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133                 u32 val;
1134
1135                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1137                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED)
1151                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153 }
1154
1155 static int
1156 bnx2_set_link(struct bnx2 *bp)
1157 {
1158         u32 bmsr;
1159         u8 link_up;
1160
1161         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1162                 bp->link_up = 1;
1163                 return 0;
1164         }
1165
1166         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167                 return 0;
1168
1169         link_up = bp->link_up;
1170
1171         bnx2_enable_bmsr1(bp);
1172         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174         bnx2_disable_bmsr1(bp);
1175
1176         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178                 u32 val;
1179
1180                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181                 if (val & BNX2_EMAC_STATUS_LINK)
1182                         bmsr |= BMSR_LSTATUS;
1183                 else
1184                         bmsr &= ~BMSR_LSTATUS;
1185         }
1186
1187         if (bmsr & BMSR_LSTATUS) {
1188                 bp->link_up = 1;
1189
1190                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1191                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192                                 bnx2_5706s_linkup(bp);
1193                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194                                 bnx2_5708s_linkup(bp);
1195                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196                                 bnx2_5709s_linkup(bp);
1197                 }
1198                 else {
1199                         bnx2_copper_linkup(bp);
1200                 }
1201                 bnx2_resolve_flow_ctrl(bp);
1202         }
1203         else {
1204                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1205                     (bp->autoneg & AUTONEG_SPEED))
1206                         bnx2_disable_forced_2g5(bp);
1207
1208                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209                 bp->link_up = 0;
1210         }
1211
1212         if (bp->link_up != link_up) {
1213                 bnx2_report_link(bp);
1214         }
1215
1216         bnx2_set_mac_link(bp);
1217
1218         return 0;
1219 }
1220
1221 static int
1222 bnx2_reset_phy(struct bnx2 *bp)
1223 {
1224         int i;
1225         u32 reg;
1226
1227         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1228
1229 #define PHY_RESET_MAX_WAIT 100
1230         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231                 udelay(10);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1234                 if (!(reg & BMCR_RESET)) {
1235                         udelay(20);
1236                         break;
1237                 }
1238         }
1239         if (i == PHY_RESET_MAX_WAIT) {
1240                 return -EBUSY;
1241         }
1242         return 0;
1243 }
1244
1245 static u32
1246 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247 {
1248         u32 adv = 0;
1249
1250         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP;
1258                 }
1259         }
1260         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262                         adv = ADVERTISE_1000XPSE_ASYM;
1263                 }
1264                 else {
1265                         adv = ADVERTISE_PAUSE_ASYM;
1266                 }
1267         }
1268         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271                 }
1272                 else {
1273                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274                 }
1275         }
1276         return adv;
1277 }
1278
1279 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
1281 static int
1282 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283 {
1284         u32 speed_arg = 0, pause_adv;
1285
1286         pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288         if (bp->autoneg & AUTONEG_SPEED) {
1289                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290                 if (bp->advertising & ADVERTISED_10baseT_Half)
1291                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292                 if (bp->advertising & ADVERTISED_10baseT_Full)
1293                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294                 if (bp->advertising & ADVERTISED_100baseT_Half)
1295                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 if (bp->advertising & ADVERTISED_100baseT_Full)
1297                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302         } else {
1303                 if (bp->req_line_speed == SPEED_2500)
1304                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305                 else if (bp->req_line_speed == SPEED_1000)
1306                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307                 else if (bp->req_line_speed == SPEED_100) {
1308                         if (bp->req_duplex == DUPLEX_FULL)
1309                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310                         else
1311                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312                 } else if (bp->req_line_speed == SPEED_10) {
1313                         if (bp->req_duplex == DUPLEX_FULL)
1314                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315                         else
1316                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317                 }
1318         }
1319
1320         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325         if (port == PORT_TP)
1326                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331         spin_unlock_bh(&bp->phy_lock);
1332         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333         spin_lock_bh(&bp->phy_lock);
1334
1335         return 0;
1336 }
1337
1338 static int
1339 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1340 {
1341         u32 adv, bmcr;
1342         u32 new_adv = 0;
1343
1344         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345                 return (bnx2_setup_remote_phy(bp, port));
1346
1347         if (!(bp->autoneg & AUTONEG_SPEED)) {
1348                 u32 new_bmcr;
1349                 int force_link_down = 0;
1350
1351                 if (bp->req_line_speed == SPEED_2500) {
1352                         if (!bnx2_test_and_enable_2g5(bp))
1353                                 force_link_down = 1;
1354                 } else if (bp->req_line_speed == SPEED_1000) {
1355                         if (bnx2_test_and_disable_2g5(bp))
1356                                 force_link_down = 1;
1357                 }
1358                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
1361                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1362                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1363                 new_bmcr |= BMCR_SPEED1000;
1364
1365                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366                         if (bp->req_line_speed == SPEED_2500)
1367                                 bnx2_enable_forced_2g5(bp);
1368                         else if (bp->req_line_speed == SPEED_1000) {
1369                                 bnx2_disable_forced_2g5(bp);
1370                                 new_bmcr &= ~0x2000;
1371                         }
1372
1373                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1374                         if (bp->req_line_speed == SPEED_2500)
1375                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376                         else
1377                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1378                 }
1379
1380                 if (bp->req_duplex == DUPLEX_FULL) {
1381                         adv |= ADVERTISE_1000XFULL;
1382                         new_bmcr |= BMCR_FULLDPLX;
1383                 }
1384                 else {
1385                         adv |= ADVERTISE_1000XHALF;
1386                         new_bmcr &= ~BMCR_FULLDPLX;
1387                 }
1388                 if ((new_bmcr != bmcr) || (force_link_down)) {
1389                         /* Force a link down visible on the other side */
1390                         if (bp->link_up) {
1391                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1392                                                ~(ADVERTISE_1000XFULL |
1393                                                  ADVERTISE_1000XHALF));
1394                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1395                                         BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397                                 bp->link_up = 0;
1398                                 netif_carrier_off(bp->dev);
1399                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1400                                 bnx2_report_link(bp);
1401                         }
1402                         bnx2_write_phy(bp, bp->mii_adv, adv);
1403                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1404                 } else {
1405                         bnx2_resolve_flow_ctrl(bp);
1406                         bnx2_set_mac_link(bp);
1407                 }
1408                 return 0;
1409         }
1410
1411         bnx2_test_and_enable_2g5(bp);
1412
1413         if (bp->advertising & ADVERTISED_1000baseT_Full)
1414                 new_adv |= ADVERTISE_1000XFULL;
1415
1416         new_adv |= bnx2_phy_get_pause_adv(bp);
1417
1418         bnx2_read_phy(bp, bp->mii_adv, &adv);
1419         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1420
1421         bp->serdes_an_pending = 0;
1422         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423                 /* Force a link down visible on the other side */
1424                 if (bp->link_up) {
1425                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1426                         spin_unlock_bh(&bp->phy_lock);
1427                         msleep(20);
1428                         spin_lock_bh(&bp->phy_lock);
1429                 }
1430
1431                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1433                         BMCR_ANENABLE);
1434                 /* Speed up link-up time when the link partner
1435                  * does not autonegotiate which is very common
1436                  * in blade servers. Some blade servers use
1437                  * IPMI for kerboard input and it's important
1438                  * to minimize link disruptions. Autoneg. involves
1439                  * exchanging base pages plus 3 next pages and
1440                  * normally completes in about 120 msec.
1441                  */
1442                 bp->current_interval = SERDES_AN_TIMEOUT;
1443                 bp->serdes_an_pending = 1;
1444                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1445         } else {
1446                 bnx2_resolve_flow_ctrl(bp);
1447                 bnx2_set_mac_link(bp);
1448         }
1449
1450         return 0;
1451 }
1452
1453 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1454         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1455                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456                 (ADVERTISED_1000baseT_Full)
1457
1458 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1459         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1460         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1461         ADVERTISED_1000baseT_Full)
1462
1463 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1465
1466 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
1468 static void
1469 bnx2_set_default_remote_link(struct bnx2 *bp)
1470 {
1471         u32 link;
1472
1473         if (bp->phy_port == PORT_TP)
1474                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475         else
1476                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479                 bp->req_line_speed = 0;
1480                 bp->autoneg |= AUTONEG_SPEED;
1481                 bp->advertising = ADVERTISED_Autoneg;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483                         bp->advertising |= ADVERTISED_10baseT_Half;
1484                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485                         bp->advertising |= ADVERTISED_10baseT_Full;
1486                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487                         bp->advertising |= ADVERTISED_100baseT_Half;
1488                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489                         bp->advertising |= ADVERTISED_100baseT_Full;
1490                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491                         bp->advertising |= ADVERTISED_1000baseT_Full;
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493                         bp->advertising |= ADVERTISED_2500baseX_Full;
1494         } else {
1495                 bp->autoneg = 0;
1496                 bp->advertising = 0;
1497                 bp->req_duplex = DUPLEX_FULL;
1498                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499                         bp->req_line_speed = SPEED_10;
1500                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501                                 bp->req_duplex = DUPLEX_HALF;
1502                 }
1503                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504                         bp->req_line_speed = SPEED_100;
1505                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506                                 bp->req_duplex = DUPLEX_HALF;
1507                 }
1508                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509                         bp->req_line_speed = SPEED_1000;
1510                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511                         bp->req_line_speed = SPEED_2500;
1512         }
1513 }
1514
1515 static void
1516 bnx2_set_default_link(struct bnx2 *bp)
1517 {
1518         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519                 return bnx2_set_default_remote_link(bp);
1520
1521         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522         bp->req_line_speed = 0;
1523         if (bp->phy_flags & PHY_SERDES_FLAG) {
1524                 u32 reg;
1525
1526                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531                         bp->autoneg = 0;
1532                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1533                         bp->req_duplex = DUPLEX_FULL;
1534                 }
1535         } else
1536                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537 }
1538
1539 static void
1540 bnx2_send_heart_beat(struct bnx2 *bp)
1541 {
1542         u32 msg;
1543         u32 addr;
1544
1545         spin_lock(&bp->indirect_lock);
1546         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550         spin_unlock(&bp->indirect_lock);
1551 }
1552
1553 static void
1554 bnx2_remote_phy_event(struct bnx2 *bp)
1555 {
1556         u32 msg;
1557         u8 link_up = bp->link_up;
1558         u8 old_port;
1559
1560         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
1562         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563                 bnx2_send_heart_beat(bp);
1564
1565         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
1567         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568                 bp->link_up = 0;
1569         else {
1570                 u32 speed;
1571
1572                 bp->link_up = 1;
1573                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574                 bp->duplex = DUPLEX_FULL;
1575                 switch (speed) {
1576                         case BNX2_LINK_STATUS_10HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_10FULL:
1579                                 bp->line_speed = SPEED_10;
1580                                 break;
1581                         case BNX2_LINK_STATUS_100HALF:
1582                                 bp->duplex = DUPLEX_HALF;
1583                         case BNX2_LINK_STATUS_100BASE_T4:
1584                         case BNX2_LINK_STATUS_100FULL:
1585                                 bp->line_speed = SPEED_100;
1586                                 break;
1587                         case BNX2_LINK_STATUS_1000HALF:
1588                                 bp->duplex = DUPLEX_HALF;
1589                         case BNX2_LINK_STATUS_1000FULL:
1590                                 bp->line_speed = SPEED_1000;
1591                                 break;
1592                         case BNX2_LINK_STATUS_2500HALF:
1593                                 bp->duplex = DUPLEX_HALF;
1594                         case BNX2_LINK_STATUS_2500FULL:
1595                                 bp->line_speed = SPEED_2500;
1596                                 break;
1597                         default:
1598                                 bp->line_speed = 0;
1599                                 break;
1600                 }
1601
1602                 spin_lock(&bp->phy_lock);
1603                 bp->flow_ctrl = 0;
1604                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606                         if (bp->duplex == DUPLEX_FULL)
1607                                 bp->flow_ctrl = bp->req_flow_ctrl;
1608                 } else {
1609                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1611                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1613                 }
1614
1615                 old_port = bp->phy_port;
1616                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617                         bp->phy_port = PORT_FIBRE;
1618                 else
1619                         bp->phy_port = PORT_TP;
1620
1621                 if (old_port != bp->phy_port)
1622                         bnx2_set_default_link(bp);
1623
1624                 spin_unlock(&bp->phy_lock);
1625         }
1626         if (bp->link_up != link_up)
1627                 bnx2_report_link(bp);
1628
1629         bnx2_set_mac_link(bp);
1630 }
1631
1632 static int
1633 bnx2_set_remote_link(struct bnx2 *bp)
1634 {
1635         u32 evt_code;
1636
1637         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638         switch (evt_code) {
1639                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640                         bnx2_remote_phy_event(bp);
1641                         break;
1642                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643                 default:
1644                         bnx2_send_heart_beat(bp);
1645                         break;
1646         }
1647         return 0;
1648 }
1649
1650 static int
1651 bnx2_setup_copper_phy(struct bnx2 *bp)
1652 {
1653         u32 bmcr;
1654         u32 new_bmcr;
1655
1656         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1657
1658         if (bp->autoneg & AUTONEG_SPEED) {
1659                 u32 adv_reg, adv1000_reg;
1660                 u32 new_adv_reg = 0;
1661                 u32 new_adv1000_reg = 0;
1662
1663                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1664                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665                         ADVERTISE_PAUSE_ASYM);
1666
1667                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668                 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670                 if (bp->advertising & ADVERTISED_10baseT_Half)
1671                         new_adv_reg |= ADVERTISE_10HALF;
1672                 if (bp->advertising & ADVERTISED_10baseT_Full)
1673                         new_adv_reg |= ADVERTISE_10FULL;
1674                 if (bp->advertising & ADVERTISED_100baseT_Half)
1675                         new_adv_reg |= ADVERTISE_100HALF;
1676                 if (bp->advertising & ADVERTISED_100baseT_Full)
1677                         new_adv_reg |= ADVERTISE_100FULL;
1678                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679                         new_adv1000_reg |= ADVERTISE_1000FULL;
1680
1681                 new_adv_reg |= ADVERTISE_CSMA;
1682
1683                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685                 if ((adv1000_reg != new_adv1000_reg) ||
1686                         (adv_reg != new_adv_reg) ||
1687                         ((bmcr & BMCR_ANENABLE) == 0)) {
1688
1689                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1690                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1691                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1692                                 BMCR_ANENABLE);
1693                 }
1694                 else if (bp->link_up) {
1695                         /* Flow ctrl may have changed from auto to forced */
1696                         /* or vice-versa. */
1697
1698                         bnx2_resolve_flow_ctrl(bp);
1699                         bnx2_set_mac_link(bp);
1700                 }
1701                 return 0;
1702         }
1703
1704         new_bmcr = 0;
1705         if (bp->req_line_speed == SPEED_100) {
1706                 new_bmcr |= BMCR_SPEED100;
1707         }
1708         if (bp->req_duplex == DUPLEX_FULL) {
1709                 new_bmcr |= BMCR_FULLDPLX;
1710         }
1711         if (new_bmcr != bmcr) {
1712                 u32 bmsr;
1713
1714                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1716
1717                 if (bmsr & BMSR_LSTATUS) {
1718                         /* Force link down */
1719                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1720                         spin_unlock_bh(&bp->phy_lock);
1721                         msleep(50);
1722                         spin_lock_bh(&bp->phy_lock);
1723
1724                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1726                 }
1727
1728                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1729
1730                 /* Normally, the new speed is setup after the link has
1731                  * gone down and up again. In some cases, link will not go
1732                  * down so we need to set up the new speed here.
1733                  */
1734                 if (bmsr & BMSR_LSTATUS) {
1735                         bp->line_speed = bp->req_line_speed;
1736                         bp->duplex = bp->req_duplex;
1737                         bnx2_resolve_flow_ctrl(bp);
1738                         bnx2_set_mac_link(bp);
1739                 }
1740         } else {
1741                 bnx2_resolve_flow_ctrl(bp);
1742                 bnx2_set_mac_link(bp);
1743         }
1744         return 0;
1745 }
1746
1747 static int
1748 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1749 {
1750         if (bp->loopback == MAC_LOOPBACK)
1751                 return 0;
1752
1753         if (bp->phy_flags & PHY_SERDES_FLAG) {
1754                 return (bnx2_setup_serdes_phy(bp, port));
1755         }
1756         else {
1757                 return (bnx2_setup_copper_phy(bp));
1758         }
1759 }
1760
1761 static int
1762 bnx2_init_5709s_phy(struct bnx2 *bp)
1763 {
1764         u32 val;
1765
1766         bp->mii_bmcr = MII_BMCR + 0x10;
1767         bp->mii_bmsr = MII_BMSR + 0x10;
1768         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769         bp->mii_adv = MII_ADVERTISE + 0x10;
1770         bp->mii_lpa = MII_LPA + 0x10;
1771         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777         bnx2_reset_phy(bp);
1778
1779         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789                 val |= BCM5708S_UP1_2G5;
1790         else
1791                 val &= ~BCM5708S_UP1_2G5;
1792         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807         return 0;
1808 }
1809
1810 static int
1811 bnx2_init_5708s_phy(struct bnx2 *bp)
1812 {
1813         u32 val;
1814
1815         bnx2_reset_phy(bp);
1816
1817         bp->mii_up1 = BCM5708S_UP1;
1818
1819         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833                 val |= BCM5708S_UP1_2G5;
1834                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835         }
1836
1837         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1838             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1840                 /* increase tx signal amplitude */
1841                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842                                BCM5708S_BLK_ADDR_TX_MISC);
1843                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847         }
1848
1849         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1850               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852         if (val) {
1853                 u32 is_backplane;
1854
1855                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1856                                           BNX2_SHARED_HW_CFG_CONFIG);
1857                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859                                        BCM5708S_BLK_ADDR_TX_MISC);
1860                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862                                        BCM5708S_BLK_ADDR_DIG);
1863                 }
1864         }
1865         return 0;
1866 }
1867
1868 static int
1869 bnx2_init_5706s_phy(struct bnx2 *bp)
1870 {
1871         bnx2_reset_phy(bp);
1872
1873         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
1875         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1877
1878         if (bp->dev->mtu > 1500) {
1879                 u32 val;
1880
1881                 /* Set extended packet length bit */
1882                 bnx2_write_phy(bp, 0x18, 0x7);
1883                 bnx2_read_phy(bp, 0x18, &val);
1884                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887                 bnx2_read_phy(bp, 0x1c, &val);
1888                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889         }
1890         else {
1891                 u32 val;
1892
1893                 bnx2_write_phy(bp, 0x18, 0x7);
1894                 bnx2_read_phy(bp, 0x18, &val);
1895                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898                 bnx2_read_phy(bp, 0x1c, &val);
1899                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900         }
1901
1902         return 0;
1903 }
1904
1905 static int
1906 bnx2_init_copper_phy(struct bnx2 *bp)
1907 {
1908         u32 val;
1909
1910         bnx2_reset_phy(bp);
1911
1912         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913                 bnx2_write_phy(bp, 0x18, 0x0c00);
1914                 bnx2_write_phy(bp, 0x17, 0x000a);
1915                 bnx2_write_phy(bp, 0x15, 0x310b);
1916                 bnx2_write_phy(bp, 0x17, 0x201f);
1917                 bnx2_write_phy(bp, 0x15, 0x9506);
1918                 bnx2_write_phy(bp, 0x17, 0x401f);
1919                 bnx2_write_phy(bp, 0x15, 0x14e2);
1920                 bnx2_write_phy(bp, 0x18, 0x0400);
1921         }
1922
1923         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1926                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927                 val &= ~(1 << 8);
1928                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929         }
1930
1931         if (bp->dev->mtu > 1500) {
1932                 /* Set extended packet length bit */
1933                 bnx2_write_phy(bp, 0x18, 0x7);
1934                 bnx2_read_phy(bp, 0x18, &val);
1935                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937                 bnx2_read_phy(bp, 0x10, &val);
1938                 bnx2_write_phy(bp, 0x10, val | 0x1);
1939         }
1940         else {
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945                 bnx2_read_phy(bp, 0x10, &val);
1946                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947         }
1948
1949         /* ethernet@wirespeed */
1950         bnx2_write_phy(bp, 0x18, 0x7007);
1951         bnx2_read_phy(bp, 0x18, &val);
1952         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1953         return 0;
1954 }
1955
1956
1957 static int
1958 bnx2_init_phy(struct bnx2 *bp)
1959 {
1960         u32 val;
1961         int rc = 0;
1962
1963         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
1966         bp->mii_bmcr = MII_BMCR;
1967         bp->mii_bmsr = MII_BMSR;
1968         bp->mii_bmsr1 = MII_BMSR;
1969         bp->mii_adv = MII_ADVERTISE;
1970         bp->mii_lpa = MII_LPA;
1971
1972         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
1974         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975                 goto setup_phy;
1976
1977         bnx2_read_phy(bp, MII_PHYSID1, &val);
1978         bp->phy_id = val << 16;
1979         bnx2_read_phy(bp, MII_PHYSID2, &val);
1980         bp->phy_id |= val & 0xffff;
1981
1982         if (bp->phy_flags & PHY_SERDES_FLAG) {
1983                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984                         rc = bnx2_init_5706s_phy(bp);
1985                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986                         rc = bnx2_init_5708s_phy(bp);
1987                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988                         rc = bnx2_init_5709s_phy(bp);
1989         }
1990         else {
1991                 rc = bnx2_init_copper_phy(bp);
1992         }
1993
1994 setup_phy:
1995         if (!rc)
1996                 rc = bnx2_setup_phy(bp, bp->phy_port);
1997
1998         return rc;
1999 }
2000
2001 static int
2002 bnx2_set_mac_loopback(struct bnx2 *bp)
2003 {
2004         u32 mac_mode;
2005
2006         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010         bp->link_up = 1;
2011         return 0;
2012 }
2013
2014 static int bnx2_test_link(struct bnx2 *);
2015
2016 static int
2017 bnx2_set_phy_loopback(struct bnx2 *bp)
2018 {
2019         u32 mac_mode;
2020         int rc, i;
2021
2022         spin_lock_bh(&bp->phy_lock);
2023         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2024                             BMCR_SPEED1000);
2025         spin_unlock_bh(&bp->phy_lock);
2026         if (rc)
2027                 return rc;
2028
2029         for (i = 0; i < 10; i++) {
2030                 if (bnx2_test_link(bp) == 0)
2031                         break;
2032                 msleep(100);
2033         }
2034
2035         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2038                       BNX2_EMAC_MODE_25G_MODE);
2039
2040         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042         bp->link_up = 1;
2043         return 0;
2044 }
2045
2046 static int
2047 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2048 {
2049         int i;
2050         u32 val;
2051
2052         bp->fw_wr_seq++;
2053         msg_data |= bp->fw_wr_seq;
2054
2055         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2056
2057         /* wait for an acknowledgement. */
2058         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059                 msleep(10);
2060
2061                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2062
2063                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064                         break;
2065         }
2066         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067                 return 0;
2068
2069         /* If we timed out, inform the firmware that this is the case. */
2070         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071                 if (!silent)
2072                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073                                             "%x\n", msg_data);
2074
2075                 msg_data &= ~BNX2_DRV_MSG_CODE;
2076                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
2078                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2079
2080                 return -EBUSY;
2081         }
2082
2083         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084                 return -EIO;
2085
2086         return 0;
2087 }
2088
2089 static int
2090 bnx2_init_5709_context(struct bnx2 *bp)
2091 {
2092         int i, ret = 0;
2093         u32 val;
2094
2095         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096         val |= (BCM_PAGE_BITS - 8) << 16;
2097         REG_WR(bp, BNX2_CTX_COMMAND, val);
2098         for (i = 0; i < 10; i++) {
2099                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101                         break;
2102                 udelay(2);
2103         }
2104         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105                 return -EBUSY;
2106
2107         for (i = 0; i < bp->ctx_pages; i++) {
2108                 int j;
2109
2110                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114                        (u64) bp->ctx_blk_mapping[i] >> 32);
2115                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117                 for (j = 0; j < 10; j++) {
2118
2119                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121                                 break;
2122                         udelay(5);
2123                 }
2124                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125                         ret = -EBUSY;
2126                         break;
2127                 }
2128         }
2129         return ret;
2130 }
2131
2132 static void
2133 bnx2_init_context(struct bnx2 *bp)
2134 {
2135         u32 vcid;
2136
2137         vcid = 96;
2138         while (vcid) {
2139                 u32 vcid_addr, pcid_addr, offset;
2140                 int i;
2141
2142                 vcid--;
2143
2144                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145                         u32 new_vcid;
2146
2147                         vcid_addr = GET_PCID_ADDR(vcid);
2148                         if (vcid & 0x8) {
2149                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150                         }
2151                         else {
2152                                 new_vcid = vcid;
2153                         }
2154                         pcid_addr = GET_PCID_ADDR(new_vcid);
2155                 }
2156                 else {
2157                         vcid_addr = GET_CID_ADDR(vcid);
2158                         pcid_addr = vcid_addr;
2159                 }
2160
2161                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162                         vcid_addr += (i << PHY_CTX_SHIFT);
2163                         pcid_addr += (i << PHY_CTX_SHIFT);
2164
2165                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2166                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2167
2168                         /* Zero out the context. */
2169                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2170                                 CTX_WR(bp, vcid_addr, offset, 0);
2171                 }
2172         }
2173 }
2174
2175 static int
2176 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177 {
2178         u16 *good_mbuf;
2179         u32 good_mbuf_cnt;
2180         u32 val;
2181
2182         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183         if (good_mbuf == NULL) {
2184                 printk(KERN_ERR PFX "Failed to allocate memory in "
2185                                     "bnx2_alloc_bad_rbuf\n");
2186                 return -ENOMEM;
2187         }
2188
2189         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192         good_mbuf_cnt = 0;
2193
2194         /* Allocate a bunch of mbufs and save the good ones in an array. */
2195         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203                 /* The addresses with Bit 9 set are bad memory blocks. */
2204                 if (!(val & (1 << 9))) {
2205                         good_mbuf[good_mbuf_cnt] = (u16) val;
2206                         good_mbuf_cnt++;
2207                 }
2208
2209                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210         }
2211
2212         /* Free the good ones back to the mbuf pool thus discarding
2213          * all the bad ones. */
2214         while (good_mbuf_cnt) {
2215                 good_mbuf_cnt--;
2216
2217                 val = good_mbuf[good_mbuf_cnt];
2218                 val = (val << 9) | val | 1;
2219
2220                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221         }
2222         kfree(good_mbuf);
2223         return 0;
2224 }
2225
2226 static void
2227 bnx2_set_mac_addr(struct bnx2 *bp)
2228 {
2229         u32 val;
2230         u8 *mac_addr = bp->dev->dev_addr;
2231
2232         val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
2236         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2237                 (mac_addr[4] << 8) | mac_addr[5];
2238
2239         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240 }
2241
2242 static inline int
2243 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244 {
2245         dma_addr_t mapping;
2246         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247         struct rx_bd *rxbd =
2248                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249         struct page *page = alloc_page(GFP_ATOMIC);
2250
2251         if (!page)
2252                 return -ENOMEM;
2253         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254                                PCI_DMA_FROMDEVICE);
2255         rx_pg->page = page;
2256         pci_unmap_addr_set(rx_pg, mapping, mapping);
2257         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259         return 0;
2260 }
2261
2262 static void
2263 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264 {
2265         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266         struct page *page = rx_pg->page;
2267
2268         if (!page)
2269                 return;
2270
2271         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272                        PCI_DMA_FROMDEVICE);
2273
2274         __free_page(page);
2275         rx_pg->page = NULL;
2276 }
2277
2278 static inline int
2279 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2280 {
2281         struct sk_buff *skb;
2282         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283         dma_addr_t mapping;
2284         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2285         unsigned long align;
2286
2287         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2288         if (skb == NULL) {
2289                 return -ENOMEM;
2290         }
2291
2292         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2294
2295         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296                 PCI_DMA_FROMDEVICE);
2297
2298         rx_buf->skb = skb;
2299         pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304         bp->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306         return 0;
2307 }
2308
2309 static int
2310 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2311 {
2312         struct status_block *sblk = bnapi->status_blk;
2313         u32 new_link_state, old_link_state;
2314         int is_set = 1;
2315
2316         new_link_state = sblk->status_attn_bits & event;
2317         old_link_state = sblk->status_attn_bits_ack & event;
2318         if (new_link_state != old_link_state) {
2319                 if (new_link_state)
2320                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321                 else
2322                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323         } else
2324                 is_set = 0;
2325
2326         return is_set;
2327 }
2328
2329 static void
2330 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2331 {
2332         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2333                 spin_lock(&bp->phy_lock);
2334                 bnx2_set_link(bp);
2335                 spin_unlock(&bp->phy_lock);
2336         }
2337         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2338                 bnx2_set_remote_link(bp);
2339
2340 }
2341
2342 static inline u16
2343 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2344 {
2345         u16 cons;
2346
2347         cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2348
2349         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350                 cons++;
2351         return cons;
2352 }
2353
2354 static void
2355 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2356 {
2357         u16 hw_cons, sw_cons, sw_ring_cons;
2358         int tx_free_bd = 0;
2359
2360         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2361         sw_cons = bp->tx_cons;
2362
2363         while (sw_cons != hw_cons) {
2364                 struct sw_bd *tx_buf;
2365                 struct sk_buff *skb;
2366                 int i, last;
2367
2368                 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371                 skb = tx_buf->skb;
2372
2373                 /* partial BD completions possible with TSO packets */
2374                 if (skb_is_gso(skb)) {
2375                         u16 last_idx, last_ring_idx;
2376
2377                         last_idx = sw_cons +
2378                                 skb_shinfo(skb)->nr_frags + 1;
2379                         last_ring_idx = sw_ring_cons +
2380                                 skb_shinfo(skb)->nr_frags + 1;
2381                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382                                 last_idx++;
2383                         }
2384                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385                                 break;
2386                         }
2387                 }
2388
2389                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390                         skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392                 tx_buf->skb = NULL;
2393                 last = skb_shinfo(skb)->nr_frags;
2394
2395                 for (i = 0; i < last; i++) {
2396                         sw_cons = NEXT_TX_BD(sw_cons);
2397
2398                         pci_unmap_page(bp->pdev,
2399                                 pci_unmap_addr(
2400                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401                                         mapping),
2402                                 skb_shinfo(skb)->frags[i].size,
2403                                 PCI_DMA_TODEVICE);
2404                 }
2405
2406                 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408                 tx_free_bd += last + 1;
2409
2410                 dev_kfree_skb(skb);
2411
2412                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2413         }
2414
2415         bp->hw_tx_cons = hw_cons;
2416         bp->tx_cons = sw_cons;
2417         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418          * before checking for netif_queue_stopped().  Without the
2419          * memory barrier, there is a small possibility that bnx2_start_xmit()
2420          * will miss it and cause the queue to be stopped forever.
2421          */
2422         smp_mb();
2423
2424         if (unlikely(netif_queue_stopped(bp->dev)) &&
2425                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2426                 netif_tx_lock(bp->dev);
2427                 if ((netif_queue_stopped(bp->dev)) &&
2428                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2429                         netif_wake_queue(bp->dev);
2430                 netif_tx_unlock(bp->dev);
2431         }
2432 }
2433
2434 static void
2435 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2436 {
2437         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438         struct rx_bd *cons_bd, *prod_bd;
2439         dma_addr_t mapping;
2440         int i;
2441         u16 hw_prod = bp->rx_pg_prod, prod;
2442         u16 cons = bp->rx_pg_cons;
2443
2444         for (i = 0; i < count; i++) {
2445                 prod = RX_PG_RING_IDX(hw_prod);
2446
2447                 prod_rx_pg = &bp->rx_pg_ring[prod];
2448                 cons_rx_pg = &bp->rx_pg_ring[cons];
2449                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2450                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2451
2452                 if (i == 0 && skb) {
2453                         struct page *page;
2454                         struct skb_shared_info *shinfo;
2455
2456                         shinfo = skb_shinfo(skb);
2457                         shinfo->nr_frags--;
2458                         page = shinfo->frags[shinfo->nr_frags].page;
2459                         shinfo->frags[shinfo->nr_frags].page = NULL;
2460                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2461                                                PCI_DMA_FROMDEVICE);
2462                         cons_rx_pg->page = page;
2463                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2464                         dev_kfree_skb(skb);
2465                 }
2466                 if (prod != cons) {
2467                         prod_rx_pg->page = cons_rx_pg->page;
2468                         cons_rx_pg->page = NULL;
2469                         pci_unmap_addr_set(prod_rx_pg, mapping,
2470                                 pci_unmap_addr(cons_rx_pg, mapping));
2471
2472                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2473                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2474
2475                 }
2476                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477                 hw_prod = NEXT_RX_BD(hw_prod);
2478         }
2479         bp->rx_pg_prod = hw_prod;
2480         bp->rx_pg_cons = cons;
2481 }
2482
2483 static inline void
2484 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2485         u16 cons, u16 prod)
2486 {
2487         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2488         struct rx_bd *cons_bd, *prod_bd;
2489
2490         cons_rx_buf = &bp->rx_buf_ring[cons];
2491         prod_rx_buf = &bp->rx_buf_ring[prod];
2492
2493         pci_dma_sync_single_for_device(bp->pdev,
2494                 pci_unmap_addr(cons_rx_buf, mapping),
2495                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496
2497         bp->rx_prod_bseq += bp->rx_buf_use_size;
2498
2499         prod_rx_buf->skb = skb;
2500
2501         if (cons == prod)
2502                 return;
2503
2504         pci_unmap_addr_set(prod_rx_buf, mapping,
2505                         pci_unmap_addr(cons_rx_buf, mapping));
2506
2507         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2508         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2509         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2510         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2511 }
2512
2513 static int
2514 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2515             unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2516 {
2517         int err;
2518         u16 prod = ring_idx & 0xffff;
2519
2520         err = bnx2_alloc_rx_skb(bp, prod);
2521         if (unlikely(err)) {
2522                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2523                 if (hdr_len) {
2524                         unsigned int raw_len = len + 4;
2525                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526
2527                         bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2528                 }
2529                 return err;
2530         }
2531
2532         skb_reserve(skb, bp->rx_offset);
2533         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2534                          PCI_DMA_FROMDEVICE);
2535
2536         if (hdr_len == 0) {
2537                 skb_put(skb, len);
2538                 return 0;
2539         } else {
2540                 unsigned int i, frag_len, frag_size, pages;
2541                 struct sw_pg *rx_pg;
2542                 u16 pg_cons = bp->rx_pg_cons;
2543                 u16 pg_prod = bp->rx_pg_prod;
2544
2545                 frag_size = len + 4 - hdr_len;
2546                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2547                 skb_put(skb, hdr_len);
2548
2549                 for (i = 0; i < pages; i++) {
2550                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2551                         if (unlikely(frag_len <= 4)) {
2552                                 unsigned int tail = 4 - frag_len;
2553
2554                                 bp->rx_pg_cons = pg_cons;
2555                                 bp->rx_pg_prod = pg_prod;
2556                                 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2557                                 skb->len -= tail;
2558                                 if (i == 0) {
2559                                         skb->tail -= tail;
2560                                 } else {
2561                                         skb_frag_t *frag =
2562                                                 &skb_shinfo(skb)->frags[i - 1];
2563                                         frag->size -= tail;
2564                                         skb->data_len -= tail;
2565                                         skb->truesize -= tail;
2566                                 }
2567                                 return 0;
2568                         }
2569                         rx_pg = &bp->rx_pg_ring[pg_cons];
2570
2571                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2572                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2573
2574                         if (i == pages - 1)
2575                                 frag_len -= 4;
2576
2577                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2578                         rx_pg->page = NULL;
2579
2580                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581                         if (unlikely(err)) {
2582                                 bp->rx_pg_cons = pg_cons;
2583                                 bp->rx_pg_prod = pg_prod;
2584                                 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2585                                 return err;
2586                         }
2587
2588                         frag_size -= frag_len;
2589                         skb->data_len += frag_len;
2590                         skb->truesize += frag_len;
2591                         skb->len += frag_len;
2592
2593                         pg_prod = NEXT_RX_BD(pg_prod);
2594                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595                 }
2596                 bp->rx_pg_prod = pg_prod;
2597                 bp->rx_pg_cons = pg_cons;
2598         }
2599         return 0;
2600 }
2601
2602 static inline u16
2603 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2604 {
2605         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2606
2607         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2608                 cons++;
2609         return cons;
2610 }
2611
2612 static int
2613 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2614 {
2615         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2616         struct l2_fhdr *rx_hdr;
2617         int rx_pkt = 0, pg_ring_used = 0;
2618
2619         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2620         sw_cons = bp->rx_cons;
2621         sw_prod = bp->rx_prod;
2622
2623         /* Memory barrier necessary as speculative reads of the rx
2624          * buffer can be ahead of the index in the status block
2625          */
2626         rmb();
2627         while (sw_cons != hw_cons) {
2628                 unsigned int len, hdr_len;
2629                 u32 status;
2630                 struct sw_bd *rx_buf;
2631                 struct sk_buff *skb;
2632                 dma_addr_t dma_addr;
2633
2634                 sw_ring_cons = RX_RING_IDX(sw_cons);
2635                 sw_ring_prod = RX_RING_IDX(sw_prod);
2636
2637                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2638                 skb = rx_buf->skb;
2639
2640                 rx_buf->skb = NULL;
2641
2642                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2643
2644                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2645                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2646
2647                 rx_hdr = (struct l2_fhdr *) skb->data;
2648                 len = rx_hdr->l2_fhdr_pkt_len;
2649
2650                 if ((status = rx_hdr->l2_fhdr_status) &
2651                         (L2_FHDR_ERRORS_BAD_CRC |
2652                         L2_FHDR_ERRORS_PHY_DECODE |
2653                         L2_FHDR_ERRORS_ALIGNMENT |
2654                         L2_FHDR_ERRORS_TOO_SHORT |
2655                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2656
2657                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2658                         goto next_rx;
2659                 }
2660                 hdr_len = 0;
2661                 if (status & L2_FHDR_STATUS_SPLIT) {
2662                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2663                         pg_ring_used = 1;
2664                 } else if (len > bp->rx_jumbo_thresh) {
2665                         hdr_len = bp->rx_jumbo_thresh;
2666                         pg_ring_used = 1;
2667                 }
2668
2669                 len -= 4;
2670
2671                 if (len <= bp->rx_copy_thresh) {
2672                         struct sk_buff *new_skb;
2673
2674                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2675                         if (new_skb == NULL) {
2676                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2677                                                   sw_ring_prod);
2678                                 goto next_rx;
2679                         }
2680
2681                         /* aligned copy */
2682                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2683                                       new_skb->data, len + 2);
2684                         skb_reserve(new_skb, 2);
2685                         skb_put(new_skb, len);
2686
2687                         bnx2_reuse_rx_skb(bp, skb,
2688                                 sw_ring_cons, sw_ring_prod);
2689
2690                         skb = new_skb;
2691                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2692                                     (sw_ring_cons << 16) | sw_ring_prod)))
2693                         goto next_rx;
2694
2695                 skb->protocol = eth_type_trans(skb, bp->dev);
2696
2697                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2698                         (ntohs(skb->protocol) != 0x8100)) {
2699
2700                         dev_kfree_skb(skb);
2701                         goto next_rx;
2702
2703                 }
2704
2705                 skb->ip_summed = CHECKSUM_NONE;
2706                 if (bp->rx_csum &&
2707                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2708                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2709
2710                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2711                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2712                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2713                 }
2714
2715 #ifdef BCM_VLAN
2716                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2717                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2718                                 rx_hdr->l2_fhdr_vlan_tag);
2719                 }
2720                 else
2721 #endif
2722                         netif_receive_skb(skb);
2723
2724                 bp->dev->last_rx = jiffies;
2725                 rx_pkt++;
2726
2727 next_rx:
2728                 sw_cons = NEXT_RX_BD(sw_cons);
2729                 sw_prod = NEXT_RX_BD(sw_prod);
2730
2731                 if ((rx_pkt == budget))
2732                         break;
2733
2734                 /* Refresh hw_cons to see if there is new work */
2735                 if (sw_cons == hw_cons) {
2736                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2737                         rmb();
2738                 }
2739         }
2740         bp->rx_cons = sw_cons;
2741         bp->rx_prod = sw_prod;
2742
2743         if (pg_ring_used)
2744                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745                          bp->rx_pg_prod);
2746
2747         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748
2749         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2750
2751         mmiowb();
2752
2753         return rx_pkt;
2754
2755 }
2756
2757 /* MSI ISR - The only difference between this and the INTx ISR
2758  * is that the MSI interrupt is always serviced.
2759  */
2760 static irqreturn_t
2761 bnx2_msi(int irq, void *dev_instance)
2762 {
2763         struct net_device *dev = dev_instance;
2764         struct bnx2 *bp = netdev_priv(dev);
2765         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2766
2767         prefetch(bnapi->status_blk);
2768         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2769                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2770                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2771
2772         /* Return here if interrupt is disabled. */
2773         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774                 return IRQ_HANDLED;
2775
2776         netif_rx_schedule(dev, &bnapi->napi);
2777
2778         return IRQ_HANDLED;
2779 }
2780
2781 static irqreturn_t
2782 bnx2_msi_1shot(int irq, void *dev_instance)
2783 {
2784         struct net_device *dev = dev_instance;
2785         struct bnx2 *bp = netdev_priv(dev);
2786         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2787
2788         prefetch(bnapi->status_blk);
2789
2790         /* Return here if interrupt is disabled. */
2791         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2792                 return IRQ_HANDLED;
2793
2794         netif_rx_schedule(dev, &bnapi->napi);
2795
2796         return IRQ_HANDLED;
2797 }
2798
2799 static irqreturn_t
2800 bnx2_interrupt(int irq, void *dev_instance)
2801 {
2802         struct net_device *dev = dev_instance;
2803         struct bnx2 *bp = netdev_priv(dev);
2804         struct bnx2_napi *bnapi = &bp->bnx2_napi;
2805         struct status_block *sblk = bnapi->status_blk;
2806
2807         /* When using INTx, it is possible for the interrupt to arrive
2808          * at the CPU before the status block posted prior to the
2809          * interrupt. Reading a register will flush the status block.
2810          * When using MSI, the MSI message will always complete after
2811          * the status block write.
2812          */
2813         if ((sblk->status_idx == bnapi->last_status_idx) &&
2814             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2815              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2816                 return IRQ_NONE;
2817
2818         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2819                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2820                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2821
2822         /* Read back to deassert IRQ immediately to avoid too many
2823          * spurious interrupts.
2824          */
2825         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2826
2827         /* Return here if interrupt is shared and is disabled. */
2828         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2829                 return IRQ_HANDLED;
2830
2831         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2832                 bnapi->last_status_idx = sblk->status_idx;
2833                 __netif_rx_schedule(dev, &bnapi->napi);
2834         }
2835
2836         return IRQ_HANDLED;
2837 }
2838
2839 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2840                                  STATUS_ATTN_BITS_TIMER_ABORT)
2841
2842 static inline int
2843 bnx2_has_work(struct bnx2_napi *bnapi)
2844 {
2845         struct bnx2 *bp = bnapi->bp;
2846         struct status_block *sblk = bp->status_blk;
2847
2848         if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) ||
2849             (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons))
2850                 return 1;
2851
2852         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2853             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2854                 return 1;
2855
2856         return 0;
2857 }
2858
2859 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2860                           int work_done, int budget)
2861 {
2862         struct status_block *sblk = bnapi->status_blk;
2863         u32 status_attn_bits = sblk->status_attn_bits;
2864         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2865
2866         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2867             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2868
2869                 bnx2_phy_int(bp, bnapi);
2870
2871                 /* This is needed to take care of transient status
2872                  * during link changes.
2873                  */
2874                 REG_WR(bp, BNX2_HC_COMMAND,
2875                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2876                 REG_RD(bp, BNX2_HC_COMMAND);
2877         }
2878
2879         if (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons)
2880                 bnx2_tx_int(bp, bnapi);
2881
2882         if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons)
2883                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2884
2885         return work_done;
2886 }
2887
2888 static int bnx2_poll(struct napi_struct *napi, int budget)
2889 {
2890         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2891         struct bnx2 *bp = bnapi->bp;
2892         int work_done = 0;
2893         struct status_block *sblk = bnapi->status_blk;
2894
2895         while (1) {
2896                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2897
2898                 if (unlikely(work_done >= budget))
2899                         break;
2900
2901                 /* bnapi->last_status_idx is used below to tell the hw how
2902                  * much work has been processed, so we must read it before
2903                  * checking for more work.
2904                  */
2905                 bnapi->last_status_idx = sblk->status_idx;
2906                 rmb();
2907                 if (likely(!bnx2_has_work(bnapi))) {
2908                         netif_rx_complete(bp->dev, napi);
2909                         if (likely(bp->flags & USING_MSI_FLAG)) {
2910                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2911                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2912                                        bnapi->last_status_idx);
2913                                 break;
2914                         }
2915                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2917                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2918                                bnapi->last_status_idx);
2919
2920                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2922                                bnapi->last_status_idx);
2923                         break;
2924                 }
2925         }
2926
2927         return work_done;
2928 }
2929
2930 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2931  * from set_multicast.
2932  */
2933 static void
2934 bnx2_set_rx_mode(struct net_device *dev)
2935 {
2936         struct bnx2 *bp = netdev_priv(dev);
2937         u32 rx_mode, sort_mode;
2938         int i;
2939
2940         spin_lock_bh(&bp->phy_lock);
2941
2942         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2943                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2944         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2945 #ifdef BCM_VLAN
2946         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2947                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2948 #else
2949         if (!(bp->flags & ASF_ENABLE_FLAG))
2950                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2951 #endif
2952         if (dev->flags & IFF_PROMISC) {
2953                 /* Promiscuous mode. */
2954                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2955                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2956                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2957         }
2958         else if (dev->flags & IFF_ALLMULTI) {
2959                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961                                0xffffffff);
2962                 }
2963                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2964         }
2965         else {
2966                 /* Accept one or more multicast(s). */
2967                 struct dev_mc_list *mclist;
2968                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2969                 u32 regidx;
2970                 u32 bit;
2971                 u32 crc;
2972
2973                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2974
2975                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2976                      i++, mclist = mclist->next) {
2977
2978                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2979                         bit = crc & 0xff;
2980                         regidx = (bit & 0xe0) >> 5;
2981                         bit &= 0x1f;
2982                         mc_filter[regidx] |= (1 << bit);
2983                 }
2984
2985                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2986                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2987                                mc_filter[i]);
2988                 }
2989
2990                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2991         }
2992
2993         if (rx_mode != bp->rx_mode) {
2994                 bp->rx_mode = rx_mode;
2995                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2996         }
2997
2998         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2999         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3000         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3001
3002         spin_unlock_bh(&bp->phy_lock);
3003 }
3004
3005 static void
3006 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3007         u32 rv2p_proc)
3008 {
3009         int i;
3010         u32 val;
3011
3012
3013         for (i = 0; i < rv2p_code_len; i += 8) {
3014                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3015                 rv2p_code++;
3016                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3017                 rv2p_code++;
3018
3019                 if (rv2p_proc == RV2P_PROC1) {
3020                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3021                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3022                 }
3023                 else {
3024                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3025                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3026                 }
3027         }
3028
3029         /* Reset the processor, un-stall is done later. */
3030         if (rv2p_proc == RV2P_PROC1) {
3031                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3032         }
3033         else {
3034                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3035         }
3036 }
3037
3038 static int
3039 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3040 {
3041         u32 offset;
3042         u32 val;
3043         int rc;
3044
3045         /* Halt the CPU. */
3046         val = REG_RD_IND(bp, cpu_reg->mode);
3047         val |= cpu_reg->mode_value_halt;
3048         REG_WR_IND(bp, cpu_reg->mode, val);
3049         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3050
3051         /* Load the Text area. */
3052         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3053         if (fw->gz_text) {
3054                 int j;
3055
3056                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3057                                        fw->gz_text_len);
3058                 if (rc < 0)
3059                         return rc;
3060
3061                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3062                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3063                 }
3064         }
3065
3066         /* Load the Data area. */
3067         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3068         if (fw->data) {
3069                 int j;
3070
3071                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3072                         REG_WR_IND(bp, offset, fw->data[j]);
3073                 }
3074         }
3075
3076         /* Load the SBSS area. */
3077         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3078         if (fw->sbss_len) {
3079                 int j;
3080
3081                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3082                         REG_WR_IND(bp, offset, 0);
3083                 }
3084         }
3085
3086         /* Load the BSS area. */
3087         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3088         if (fw->bss_len) {
3089                 int j;
3090
3091                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3092                         REG_WR_IND(bp, offset, 0);
3093                 }
3094         }
3095
3096         /* Load the Read-Only area. */
3097         offset = cpu_reg->spad_base +
3098                 (fw->rodata_addr - cpu_reg->mips_view_base);
3099         if (fw->rodata) {
3100                 int j;
3101
3102                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3103                         REG_WR_IND(bp, offset, fw->rodata[j]);
3104                 }
3105         }
3106
3107         /* Clear the pre-fetch instruction. */
3108         REG_WR_IND(bp, cpu_reg->inst, 0);
3109         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3110
3111         /* Start the CPU. */
3112         val = REG_RD_IND(bp, cpu_reg->mode);
3113         val &= ~cpu_reg->mode_value_halt;
3114         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3115         REG_WR_IND(bp, cpu_reg->mode, val);
3116
3117         return 0;
3118 }
3119
3120 static int
3121 bnx2_init_cpus(struct bnx2 *bp)
3122 {
3123         struct cpu_reg cpu_reg;
3124         struct fw_info *fw;
3125         int rc, rv2p_len;
3126         void *text, *rv2p;
3127
3128         /* Initialize the RV2P processor. */
3129         text = vmalloc(FW_BUF_SIZE);
3130         if (!text)
3131                 return -ENOMEM;
3132         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3133                 rv2p = bnx2_xi_rv2p_proc1;
3134                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3135         } else {
3136                 rv2p = bnx2_rv2p_proc1;
3137                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3138         }
3139         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3140         if (rc < 0)
3141                 goto init_cpu_err;
3142
3143         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3144
3145         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3146                 rv2p = bnx2_xi_rv2p_proc2;
3147                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3148         } else {
3149                 rv2p = bnx2_rv2p_proc2;
3150                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3151         }
3152         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3153         if (rc < 0)
3154                 goto init_cpu_err;
3155
3156         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3157
3158         /* Initialize the RX Processor. */
3159         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3160         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3161         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3162         cpu_reg.state = BNX2_RXP_CPU_STATE;
3163         cpu_reg.state_value_clear = 0xffffff;
3164         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3165         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3166         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3167         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3168         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3169         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3170         cpu_reg.mips_view_base = 0x8000000;
3171
3172         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3173                 fw = &bnx2_rxp_fw_09;
3174         else
3175                 fw = &bnx2_rxp_fw_06;
3176
3177         fw->text = text;
3178         rc = load_cpu_fw(bp, &cpu_reg, fw);
3179         if (rc)
3180                 goto init_cpu_err;
3181
3182         /* Initialize the TX Processor. */
3183         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3184         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3185         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3186         cpu_reg.state = BNX2_TXP_CPU_STATE;
3187         cpu_reg.state_value_clear = 0xffffff;
3188         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3189         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3190         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3191         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3192         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3193         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3194         cpu_reg.mips_view_base = 0x8000000;
3195
3196         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3197                 fw = &bnx2_txp_fw_09;
3198         else
3199                 fw = &bnx2_txp_fw_06;
3200
3201         fw->text = text;
3202         rc = load_cpu_fw(bp, &cpu_reg, fw);
3203         if (rc)
3204                 goto init_cpu_err;
3205
3206         /* Initialize the TX Patch-up Processor. */
3207         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3208         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3209         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3210         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3211         cpu_reg.state_value_clear = 0xffffff;
3212         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3213         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3214         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3215         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3216         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3217         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3218         cpu_reg.mips_view_base = 0x8000000;
3219
3220         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3221                 fw = &bnx2_tpat_fw_09;
3222         else
3223                 fw = &bnx2_tpat_fw_06;
3224
3225         fw->text = text;
3226         rc = load_cpu_fw(bp, &cpu_reg, fw);
3227         if (rc)
3228                 goto init_cpu_err;
3229
3230         /* Initialize the Completion Processor. */
3231         cpu_reg.mode = BNX2_COM_CPU_MODE;
3232         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3233         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3234         cpu_reg.state = BNX2_COM_CPU_STATE;
3235         cpu_reg.state_value_clear = 0xffffff;
3236         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3237         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3238         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3239         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3240         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3241         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3242         cpu_reg.mips_view_base = 0x8000000;
3243
3244         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3245                 fw = &bnx2_com_fw_09;
3246         else
3247                 fw = &bnx2_com_fw_06;
3248
3249         fw->text = text;
3250         rc = load_cpu_fw(bp, &cpu_reg, fw);
3251         if (rc)
3252                 goto init_cpu_err;
3253
3254         /* Initialize the Command Processor. */
3255         cpu_reg.mode = BNX2_CP_CPU_MODE;
3256         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3257         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3258         cpu_reg.state = BNX2_CP_CPU_STATE;
3259         cpu_reg.state_value_clear = 0xffffff;
3260         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3261         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3262         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3263         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3264         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3265         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3266         cpu_reg.mips_view_base = 0x8000000;
3267
3268         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3269                 fw = &bnx2_cp_fw_09;
3270         else
3271                 fw = &bnx2_cp_fw_06;
3272
3273         fw->text = text;
3274         rc = load_cpu_fw(bp, &cpu_reg, fw);
3275
3276 init_cpu_err:
3277         vfree(text);
3278         return rc;
3279 }
3280
3281 static int
3282 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3283 {
3284         u16 pmcsr;
3285
3286         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3287
3288         switch (state) {
3289         case PCI_D0: {
3290                 u32 val;
3291
3292                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3293                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3294                         PCI_PM_CTRL_PME_STATUS);
3295
3296                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3297                         /* delay required during transition out of D3hot */
3298                         msleep(20);
3299
3300                 val = REG_RD(bp, BNX2_EMAC_MODE);
3301                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3302                 val &= ~BNX2_EMAC_MODE_MPKT;
3303                 REG_WR(bp, BNX2_EMAC_MODE, val);
3304
3305                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3306                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3307                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3308                 break;
3309         }
3310         case PCI_D3hot: {
3311                 int i;
3312                 u32 val, wol_msg;
3313
3314                 if (bp->wol) {
3315                         u32 advertising;
3316                         u8 autoneg;
3317
3318                         autoneg = bp->autoneg;
3319                         advertising = bp->advertising;
3320
3321                         if (bp->phy_port == PORT_TP) {
3322                                 bp->autoneg = AUTONEG_SPEED;
3323                                 bp->advertising = ADVERTISED_10baseT_Half |
3324                                         ADVERTISED_10baseT_Full |
3325                                         ADVERTISED_100baseT_Half |
3326                                         ADVERTISED_100baseT_Full |
3327                                         ADVERTISED_Autoneg;
3328                         }
3329
3330                         spin_lock_bh(&bp->phy_lock);
3331                         bnx2_setup_phy(bp, bp->phy_port);
3332                         spin_unlock_bh(&bp->phy_lock);
3333
3334                         bp->autoneg = autoneg;
3335                         bp->advertising = advertising;
3336
3337                         bnx2_set_mac_addr(bp);
3338
3339                         val = REG_RD(bp, BNX2_EMAC_MODE);
3340
3341                         /* Enable port mode. */
3342                         val &= ~BNX2_EMAC_MODE_PORT;
3343                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3344                                BNX2_EMAC_MODE_ACPI_RCVD |
3345                                BNX2_EMAC_MODE_MPKT;
3346                         if (bp->phy_port == PORT_TP)
3347                                 val |= BNX2_EMAC_MODE_PORT_MII;
3348                         else {
3349                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3350                                 if (bp->line_speed == SPEED_2500)
3351                                         val |= BNX2_EMAC_MODE_25G_MODE;
3352                         }
3353
3354                         REG_WR(bp, BNX2_EMAC_MODE, val);
3355
3356                         /* receive all multicast */
3357                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3358                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3359                                        0xffffffff);
3360                         }
3361                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3362                                BNX2_EMAC_RX_MODE_SORT_MODE);
3363
3364                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3365                               BNX2_RPM_SORT_USER0_MC_EN;
3366                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3367                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3368                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3369                                BNX2_RPM_SORT_USER0_ENA);
3370
3371                         /* Need to enable EMAC and RPM for WOL. */
3372                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3373                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3374                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3375                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3376
3377                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3378                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3379                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3380
3381                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3382                 }
3383                 else {
3384                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3385                 }
3386
3387                 if (!(bp->flags & NO_WOL_FLAG))
3388                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3389
3390                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3391                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3392                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3393
3394                         if (bp->wol)
3395                                 pmcsr |= 3;
3396                 }
3397                 else {
3398                         pmcsr |= 3;
3399                 }
3400                 if (bp->wol) {
3401                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3402                 }
3403                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3404                                       pmcsr);
3405
3406                 /* No more memory access after this point until
3407                  * device is brought back to D0.
3408                  */
3409                 udelay(50);
3410                 break;
3411         }
3412         default:
3413                 return -EINVAL;
3414         }
3415         return 0;
3416 }
3417
3418 static int
3419 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3420 {
3421         u32 val;
3422         int j;
3423
3424         /* Request access to the flash interface. */
3425         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3426         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3428                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3429                         break;
3430
3431                 udelay(5);
3432         }
3433
3434         if (j >= NVRAM_TIMEOUT_COUNT)
3435                 return -EBUSY;
3436
3437         return 0;
3438 }
3439
3440 static int
3441 bnx2_release_nvram_lock(struct bnx2 *bp)
3442 {
3443         int j;
3444         u32 val;
3445
3446         /* Relinquish nvram interface. */
3447         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3448
3449         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3450                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3451                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3452                         break;
3453
3454                 udelay(5);
3455         }
3456
3457         if (j >= NVRAM_TIMEOUT_COUNT)
3458                 return -EBUSY;
3459
3460         return 0;
3461 }
3462
3463
3464 static int
3465 bnx2_enable_nvram_write(struct bnx2 *bp)
3466 {
3467         u32 val;
3468
3469         val = REG_RD(bp, BNX2_MISC_CFG);
3470         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3471
3472         if (bp->flash_info->flags & BNX2_NV_WREN) {
3473                 int j;
3474
3475                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3476                 REG_WR(bp, BNX2_NVM_COMMAND,
3477                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3478
3479                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3480                         udelay(5);
3481
3482                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3483                         if (val & BNX2_NVM_COMMAND_DONE)
3484                                 break;
3485                 }
3486
3487                 if (j >= NVRAM_TIMEOUT_COUNT)
3488                         return -EBUSY;
3489         }
3490         return 0;
3491 }
3492
3493 static void
3494 bnx2_disable_nvram_write(struct bnx2 *bp)
3495 {
3496         u32 val;
3497
3498         val = REG_RD(bp, BNX2_MISC_CFG);
3499         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3500 }
3501
3502
3503 static void
3504 bnx2_enable_nvram_access(struct bnx2 *bp)
3505 {
3506         u32 val;
3507
3508         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3509         /* Enable both bits, even on read. */
3510         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3511                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3512 }
3513
3514 static void
3515 bnx2_disable_nvram_access(struct bnx2 *bp)
3516 {
3517         u32 val;
3518
3519         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3520         /* Disable both bits, even after read. */
3521         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3522                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3523                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3524 }
3525
3526 static int
3527 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3528 {
3529         u32 cmd;
3530         int j;
3531
3532         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3533                 /* Buffered flash, no erase needed */
3534                 return 0;
3535
3536         /* Build an erase command */
3537         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3538               BNX2_NVM_COMMAND_DOIT;
3539
3540         /* Need to clear DONE bit separately. */
3541         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3542
3543         /* Address of the NVRAM to read from. */
3544         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3545
3546         /* Issue an erase command. */
3547         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3548
3549         /* Wait for completion. */
3550         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3551                 u32 val;
3552
3553                 udelay(5);
3554
3555                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3556                 if (val & BNX2_NVM_COMMAND_DONE)
3557                         break;
3558         }
3559
3560         if (j >= NVRAM_TIMEOUT_COUNT)
3561                 return -EBUSY;
3562
3563         return 0;
3564 }
3565
3566 static int
3567 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3568 {
3569         u32 cmd;
3570         int j;
3571
3572         /* Build the command word. */
3573         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3574
3575         /* Calculate an offset of a buffered flash, not needed for 5709. */
3576         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3577                 offset = ((offset / bp->flash_info->page_size) <<
3578                            bp->flash_info->page_bits) +
3579                           (offset % bp->flash_info->page_size);
3580         }
3581
3582         /* Need to clear DONE bit separately. */
3583         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584
3585         /* Address of the NVRAM to read from. */
3586         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3587
3588         /* Issue a read command. */
3589         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3590
3591         /* Wait for completion. */
3592         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3593                 u32 val;
3594
3595                 udelay(5);
3596
3597                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3598                 if (val & BNX2_NVM_COMMAND_DONE) {
3599                         val = REG_RD(bp, BNX2_NVM_READ);
3600
3601                         val = be32_to_cpu(val);
3602                         memcpy(ret_val, &val, 4);
3603                         break;
3604                 }
3605         }
3606         if (j >= NVRAM_TIMEOUT_COUNT)
3607                 return -EBUSY;
3608
3609         return 0;
3610 }
3611
3612
3613 static int
3614 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3615 {
3616         u32 cmd, val32;
3617         int j;
3618
3619         /* Build the command word. */
3620         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3621
3622         /* Calculate an offset of a buffered flash, not needed for 5709. */
3623         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3624                 offset = ((offset / bp->flash_info->page_size) <<
3625                           bp->flash_info->page_bits) +
3626                          (offset % bp->flash_info->page_size);
3627         }
3628
3629         /* Need to clear DONE bit separately. */
3630         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3631
3632         memcpy(&val32, val, 4);
3633         val32 = cpu_to_be32(val32);
3634
3635         /* Write the data. */
3636         REG_WR(bp, BNX2_NVM_WRITE, val32);
3637
3638         /* Address of the NVRAM to write to. */
3639         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3640
3641         /* Issue the write command. */
3642         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3643
3644         /* Wait for completion. */
3645         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3646                 udelay(5);
3647
3648                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3649                         break;
3650         }
3651         if (j >= NVRAM_TIMEOUT_COUNT)
3652                 return -EBUSY;
3653
3654         return 0;
3655 }
3656
3657 static int
3658 bnx2_init_nvram(struct bnx2 *bp)
3659 {
3660         u32 val;
3661         int j, entry_count, rc = 0;
3662         struct flash_spec *flash;
3663
3664         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3665                 bp->flash_info = &flash_5709;
3666                 goto get_flash_size;
3667         }
3668
3669         /* Determine the selected interface. */
3670         val = REG_RD(bp, BNX2_NVM_CFG1);
3671
3672         entry_count = ARRAY_SIZE(flash_table);
3673
3674         if (val & 0x40000000) {
3675
3676                 /* Flash interface has been reconfigured */
3677                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3678                      j++, flash++) {
3679                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3680                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3681                                 bp->flash_info = flash;
3682                                 break;
3683                         }
3684                 }
3685         }
3686         else {
3687                 u32 mask;
3688                 /* Not yet been reconfigured */
3689
3690                 if (val & (1 << 23))
3691                         mask = FLASH_BACKUP_STRAP_MASK;
3692                 else
3693                         mask = FLASH_STRAP_MASK;
3694
3695                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3696                         j++, flash++) {
3697
3698                         if ((val & mask) == (flash->strapping & mask)) {
3699                                 bp->flash_info = flash;
3700
3701                                 /* Request access to the flash interface. */
3702                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3703                                         return rc;
3704
3705                                 /* Enable access to flash interface */
3706                                 bnx2_enable_nvram_access(bp);
3707
3708                                 /* Reconfigure the flash interface */
3709                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3710                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3711                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3712                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3713
3714                                 /* Disable access to flash interface */
3715                                 bnx2_disable_nvram_access(bp);
3716                                 bnx2_release_nvram_lock(bp);
3717
3718                                 break;
3719                         }
3720                 }
3721         } /* if (val & 0x40000000) */
3722
3723         if (j == entry_count) {
3724                 bp->flash_info = NULL;
3725                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3726                 return -ENODEV;
3727         }
3728
3729 get_flash_size:
3730         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3731         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3732         if (val)
3733                 bp->flash_size = val;
3734         else
3735                 bp->flash_size = bp->flash_info->total_size;
3736
3737         return rc;
3738 }
3739
3740 static int
3741 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3742                 int buf_size)
3743 {
3744         int rc = 0;
3745         u32 cmd_flags, offset32, len32, extra;
3746
3747         if (buf_size == 0)
3748                 return 0;
3749
3750         /* Request access to the flash interface. */
3751         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752                 return rc;
3753
3754         /* Enable access to flash interface */
3755         bnx2_enable_nvram_access(bp);
3756
3757         len32 = buf_size;
3758         offset32 = offset;
3759         extra = 0;
3760
3761         cmd_flags = 0;
3762
3763         if (offset32 & 3) {
3764                 u8 buf[4];
3765                 u32 pre_len;
3766
3767                 offset32 &= ~3;
3768                 pre_len = 4 - (offset & 3);
3769
3770                 if (pre_len >= len32) {
3771                         pre_len = len32;
3772                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3773                                     BNX2_NVM_COMMAND_LAST;
3774                 }
3775                 else {
3776                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3777                 }
3778
3779                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780
3781                 if (rc)
3782                         return rc;
3783
3784                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3785
3786                 offset32 += 4;
3787                 ret_buf += pre_len;
3788                 len32 -= pre_len;
3789         }
3790         if (len32 & 3) {
3791                 extra = 4 - (len32 & 3);
3792                 len32 = (len32 + 4) & ~3;
3793         }
3794
3795         if (len32 == 4) {
3796                 u8 buf[4];
3797
3798                 if (cmd_flags)
3799                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3800                 else
3801                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3802                                     BNX2_NVM_COMMAND_LAST;
3803
3804                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3805
3806                 memcpy(ret_buf, buf, 4 - extra);
3807         }
3808         else if (len32 > 0) {
3809                 u8 buf[4];
3810
3811                 /* Read the first word. */
3812                 if (cmd_flags)
3813                         cmd_flags = 0;
3814                 else
3815                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816
3817                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3818
3819                 /* Advance to the next dword. */
3820                 offset32 += 4;
3821                 ret_buf += 4;
3822                 len32 -= 4;
3823
3824                 while (len32 > 4 && rc == 0) {
3825                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3826
3827                         /* Advance to the next dword. */
3828                         offset32 += 4;
3829                         ret_buf += 4;
3830                         len32 -= 4;
3831                 }
3832
3833                 if (rc)
3834                         return rc;
3835
3836                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3837                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3838
3839                 memcpy(ret_buf, buf, 4 - extra);
3840         }
3841
3842         /* Disable access to flash interface */
3843         bnx2_disable_nvram_access(bp);
3844
3845         bnx2_release_nvram_lock(bp);
3846
3847         return rc;
3848 }
3849
3850 static int
3851 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3852                 int buf_size)
3853 {
3854         u32 written, offset32, len32;
3855         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3856         int rc = 0;
3857         int align_start, align_end;
3858
3859         buf = data_buf;
3860         offset32 = offset;
3861         len32 = buf_size;
3862         align_start = align_end = 0;
3863
3864         if ((align_start = (offset32 & 3))) {
3865                 offset32 &= ~3;
3866                 len32 += align_start;
3867                 if (len32 < 4)
3868                         len32 = 4;
3869                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3870                         return rc;
3871         }
3872
3873         if (len32 & 3) {
3874                 align_end = 4 - (len32 & 3);
3875                 len32 += align_end;
3876                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3877                         return rc;
3878         }
3879
3880         if (align_start || align_end) {
3881                 align_buf = kmalloc(len32, GFP_KERNEL);
3882                 if (align_buf == NULL)
3883                         return -ENOMEM;
3884                 if (align_start) {
3885                         memcpy(align_buf, start, 4);
3886                 }
3887                 if (align_end) {
3888                         memcpy(align_buf + len32 - 4, end, 4);
3889                 }
3890                 memcpy(align_buf + align_start, data_buf, buf_size);
3891                 buf = align_buf;
3892         }
3893
3894         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3895                 flash_buffer = kmalloc(264, GFP_KERNEL);
3896                 if (flash_buffer == NULL) {
3897                         rc = -ENOMEM;
3898                         goto nvram_write_end;
3899                 }
3900         }
3901
3902         written = 0;
3903         while ((written < len32) && (rc == 0)) {
3904                 u32 page_start, page_end, data_start, data_end;
3905                 u32 addr, cmd_flags;
3906                 int i;
3907
3908                 /* Find the page_start addr */
3909                 page_start = offset32 + written;
3910                 page_start -= (page_start % bp->flash_info->page_size);
3911                 /* Find the page_end addr */
3912                 page_end = page_start + bp->flash_info->page_size;
3913                 /* Find the data_start addr */
3914                 data_start = (written == 0) ? offset32 : page_start;
3915                 /* Find the data_end addr */
3916                 data_end = (page_end > offset32 + len32) ?
3917                         (offset32 + len32) : page_end;
3918
3919                 /* Request access to the flash interface. */
3920                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3921                         goto nvram_write_end;
3922
3923                 /* Enable access to flash interface */
3924                 bnx2_enable_nvram_access(bp);
3925
3926                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3927                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3928                         int j;
3929
3930                         /* Read the whole page into the buffer
3931                          * (non-buffer flash only) */
3932                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3933                                 if (j == (bp->flash_info->page_size - 4)) {
3934                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3935                                 }
3936                                 rc = bnx2_nvram_read_dword(bp,
3937                                         page_start + j,
3938                                         &flash_buffer[j],
3939                                         cmd_flags);
3940
3941                                 if (rc)
3942                                         goto nvram_write_end;
3943
3944                                 cmd_flags = 0;
3945                         }
3946                 }
3947
3948                 /* Enable writes to flash interface (unlock write-protect) */
3949                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3950                         goto nvram_write_end;
3951
3952                 /* Loop to write back the buffer data from page_start to
3953                  * data_start */
3954                 i = 0;
3955                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3956                         /* Erase the page */
3957                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3958                                 goto nvram_write_end;
3959
3960                         /* Re-enable the write again for the actual write */
3961                         bnx2_enable_nvram_write(bp);
3962
3963                         for (addr = page_start; addr < data_start;
3964                                 addr += 4, i += 4) {
3965
3966                                 rc = bnx2_nvram_write_dword(bp, addr,
3967                                         &flash_buffer[i], cmd_flags);
3968
3969                                 if (rc != 0)
3970                                         goto nvram_write_end;
3971
3972                                 cmd_flags = 0;
3973                         }
3974                 }
3975
3976                 /* Loop to write the new data from data_start to data_end */
3977                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3978                         if ((addr == page_end - 4) ||
3979                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3980                                  (addr == data_end - 4))) {
3981
3982                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3983                         }
3984                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3985                                 cmd_flags);
3986
3987                         if (rc != 0)
3988                                 goto nvram_write_end;
3989
3990                         cmd_flags = 0;
3991                         buf += 4;
3992                 }
3993
3994                 /* Loop to write back the buffer data from data_end
3995                  * to page_end */
3996                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3997                         for (addr = data_end; addr < page_end;
3998                                 addr += 4, i += 4) {
3999
4000                                 if (addr == page_end-4) {
4001                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4002                                 }
4003                                 rc = bnx2_nvram_write_dword(bp, addr,
4004                                         &flash_buffer[i], cmd_flags);
4005
4006                                 if (rc != 0)
4007                                         goto nvram_write_end;
4008
4009                                 cmd_flags = 0;
4010                         }
4011                 }
4012
4013                 /* Disable writes to flash interface (lock write-protect) */
4014                 bnx2_disable_nvram_write(bp);
4015
4016                 /* Disable access to flash interface */
4017                 bnx2_disable_nvram_access(bp);
4018                 bnx2_release_nvram_lock(bp);
4019
4020                 /* Increment written */
4021                 written += data_end - data_start;
4022         }
4023
4024 nvram_write_end:
4025         kfree(flash_buffer);
4026         kfree(align_buf);
4027         return rc;
4028 }
4029
4030 static void
4031 bnx2_init_remote_phy(struct bnx2 *bp)
4032 {
4033         u32 val;
4034
4035         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4036         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4037                 return;
4038
4039         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4040         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4041                 return;
4042
4043         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4044                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4045
4046                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4047                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4048                         bp->phy_port = PORT_FIBRE;
4049                 else
4050                         bp->phy_port = PORT_TP;
4051
4052                 if (netif_running(bp->dev)) {
4053                         u32 sig;
4054
4055                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4056                                 bp->link_up = 1;
4057                                 netif_carrier_on(bp->dev);
4058                         } else {
4059                                 bp->link_up = 0;
4060                                 netif_carrier_off(bp->dev);
4061                         }
4062                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4063                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4064                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4065                                    sig);
4066                 }
4067         }
4068 }
4069
4070 static int
4071 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4072 {
4073         u32 val;
4074         int i, rc = 0;
4075         u8 old_port;
4076
4077         /* Wait for the current PCI transaction to complete before
4078          * issuing a reset. */
4079         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4080                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4081                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4082                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4083                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4084         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4085         udelay(5);
4086
4087         /* Wait for the firmware to tell us it is ok to issue a reset. */
4088         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4089
4090         /* Deposit a driver reset signature so the firmware knows that
4091          * this is a soft reset. */
4092         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4093                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4094
4095         /* Do a dummy read to force the chip to complete all current transaction
4096          * before we issue a reset. */
4097         val = REG_RD(bp, BNX2_MISC_ID);
4098
4099         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4100                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4101                 REG_RD(bp, BNX2_MISC_COMMAND);
4102                 udelay(5);
4103
4104                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4105                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4106
4107                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4108
4109         } else {
4110                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4111                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4112                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4113
4114                 /* Chip reset. */
4115                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4116
4117                 /* Reading back any register after chip reset will hang the
4118                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4119                  * of margin for write posting.
4120                  */
4121                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4122                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4123                         msleep(20);
4124
4125                 /* Reset takes approximate 30 usec */
4126                 for (i = 0; i < 10; i++) {
4127                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4128                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4129                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4130                                 break;
4131                         udelay(10);
4132                 }
4133
4134                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4135                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4136                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4137                         return -EBUSY;
4138                 }
4139         }
4140
4141         /* Make sure byte swapping is properly configured. */
4142         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4143         if (val != 0x01020304) {
4144                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4145                 return -ENODEV;
4146         }
4147
4148         /* Wait for the firmware to finish its initialization. */
4149         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4150         if (rc)
4151                 return rc;
4152
4153         spin_lock_bh(&bp->phy_lock);
4154         old_port = bp->phy_port;
4155         bnx2_init_remote_phy(bp);
4156         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4157                 bnx2_set_default_remote_link(bp);
4158         spin_unlock_bh(&bp->phy_lock);
4159
4160         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4161                 /* Adjust the voltage regular to two steps lower.  The default
4162                  * of this register is 0x0000000e. */
4163                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4164
4165                 /* Remove bad rbuf memory from the free pool. */
4166                 rc = bnx2_alloc_bad_rbuf(bp);
4167         }
4168
4169         return rc;
4170 }
4171
4172 static int
4173 bnx2_init_chip(struct bnx2 *bp)
4174 {
4175         u32 val;
4176         int rc;
4177
4178         /* Make sure the interrupt is not active. */
4179         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4180
4181         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4182               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4183 #ifdef __BIG_ENDIAN
4184               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4185 #endif
4186               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4187               DMA_READ_CHANS << 12 |
4188               DMA_WRITE_CHANS << 16;
4189
4190         val |= (0x2 << 20) | (1 << 11);
4191
4192         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4193                 val |= (1 << 23);
4194
4195         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4196             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4197                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4198
4199         REG_WR(bp, BNX2_DMA_CONFIG, val);
4200
4201         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4202                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4203                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4204                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4205         }
4206
4207         if (bp->flags & PCIX_FLAG) {
4208                 u16 val16;
4209
4210                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4211                                      &val16);
4212                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4213                                       val16 & ~PCI_X_CMD_ERO);
4214         }
4215
4216         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4217                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4218                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4219                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4220
4221         /* Initialize context mapping and zero out the quick contexts.  The
4222          * context block must have already been enabled. */
4223         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4224                 rc = bnx2_init_5709_context(bp);
4225                 if (rc)
4226                         return rc;
4227         } else
4228                 bnx2_init_context(bp);
4229
4230         if ((rc = bnx2_init_cpus(bp)) != 0)
4231                 return rc;
4232
4233         bnx2_init_nvram(bp);
4234
4235         bnx2_set_mac_addr(bp);
4236
4237         val = REG_RD(bp, BNX2_MQ_CONFIG);
4238         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4239         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4240         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4241                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4242
4243         REG_WR(bp, BNX2_MQ_CONFIG, val);
4244
4245         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4246         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4247         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4248
4249         val = (BCM_PAGE_BITS - 8) << 24;
4250         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4251
4252         /* Configure page size. */
4253         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4254         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4255         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4256         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4257
4258         val = bp->mac_addr[0] +
4259               (bp->mac_addr[1] << 8) +
4260               (bp->mac_addr[2] << 16) +
4261               bp->mac_addr[3] +
4262               (bp->mac_addr[4] << 8) +
4263               (bp->mac_addr[5] << 16);
4264         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4265
4266         /* Program the MTU.  Also include 4 bytes for CRC32. */
4267         val = bp->dev->mtu + ETH_HLEN + 4;
4268         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4269                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4270         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4271
4272         bp->bnx2_napi.last_status_idx = 0;
4273         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4274
4275         /* Set up how to generate a link change interrupt. */
4276         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4277
4278         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4279                (u64) bp->status_blk_mapping & 0xffffffff);
4280         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4281
4282         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4283                (u64) bp->stats_blk_mapping & 0xffffffff);
4284         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4285                (u64) bp->stats_blk_mapping >> 32);
4286
4287         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4288                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4289
4290         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4291                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4292
4293         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4294                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4295
4296         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4297
4298         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4299
4300         REG_WR(bp, BNX2_HC_COM_TICKS,
4301                (bp->com_ticks_int << 16) | bp->com_ticks);
4302
4303         REG_WR(bp, BNX2_HC_CMD_TICKS,
4304                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4305
4306         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4307                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4308         else
4309                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4310         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4311
4312         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4313                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4314         else {
4315                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4316                       BNX2_HC_CONFIG_COLLECT_STATS;
4317         }
4318
4319         if (bp->flags & ONE_SHOT_MSI_FLAG)
4320                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4321
4322         REG_WR(bp, BNX2_HC_CONFIG, val);
4323
4324         /* Clear internal stats counters. */
4325         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4326
4327         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4328
4329         /* Initialize the receive filter. */
4330         bnx2_set_rx_mode(bp->dev);
4331
4332         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4333                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4334                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4335                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4336         }
4337         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4338                           0);
4339
4340         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4341         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4342
4343         udelay(20);
4344
4345         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4346
4347         return rc;
4348 }
4349
4350 static void
4351 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4352 {
4353         u32 val, offset0, offset1, offset2, offset3;
4354
4355         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4356                 offset0 = BNX2_L2CTX_TYPE_XI;
4357                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4358                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4359                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4360         } else {
4361                 offset0 = BNX2_L2CTX_TYPE;
4362                 offset1 = BNX2_L2CTX_CMD_TYPE;
4363                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4364                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4365         }
4366         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4367         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4368
4369         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4370         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4371
4372         val = (u64) bp->tx_desc_mapping >> 32;
4373         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4374
4375         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4376         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4377 }
4378
4379 static void
4380 bnx2_init_tx_ring(struct bnx2 *bp)
4381 {
4382         struct tx_bd *txbd;
4383         u32 cid;
4384
4385         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4386
4387         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4388
4389         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4390         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4391
4392         bp->tx_prod = 0;
4393         bp->tx_cons = 0;
4394         bp->hw_tx_cons = 0;
4395         bp->tx_prod_bseq = 0;
4396
4397         cid = TX_CID;
4398         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4399         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4400
4401         bnx2_init_tx_context(bp, cid);
4402 }
4403
4404 static void
4405 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4406                      int num_rings)
4407 {
4408         int i;
4409         struct rx_bd *rxbd;
4410
4411         for (i = 0; i < num_rings; i++) {
4412                 int j;
4413
4414                 rxbd = &rx_ring[i][0];
4415                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4416                         rxbd->rx_bd_len = buf_size;
4417                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4418                 }
4419                 if (i == (num_rings - 1))
4420                         j = 0;
4421                 else
4422                         j = i + 1;
4423                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4424                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4425         }
4426 }
4427
4428 static void
4429 bnx2_init_rx_ring(struct bnx2 *bp)
4430 {
4431         int i;
4432         u16 prod, ring_prod;
4433         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4434
4435         bp->rx_prod = 0;
4436         bp->rx_cons = 0;
4437         bp->rx_prod_bseq = 0;
4438         bp->rx_pg_prod = 0;
4439         bp->rx_pg_cons = 0;
4440
4441         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4442                              bp->rx_buf_use_size, bp->rx_max_ring);
4443
4444         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4445         if (bp->rx_pg_ring_size) {
4446                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4447                                      bp->rx_pg_desc_mapping,
4448                                      PAGE_SIZE, bp->rx_max_pg_ring);
4449                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4450                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4451                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4452                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4453
4454                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4455                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4456
4457                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4458                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4459
4460                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4461                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4462         }
4463
4464         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4465         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4466         val |= 0x02 << 8;
4467         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4468
4469         val = (u64) bp->rx_desc_mapping[0] >> 32;
4470         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4471
4472         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4473         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4474
4475         ring_prod = prod = bp->rx_pg_prod;
4476         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4477                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4478                         break;
4479                 prod = NEXT_RX_BD(prod);
4480                 ring_prod = RX_PG_RING_IDX(prod);
4481         }
4482         bp->rx_pg_prod = prod;
4483
4484         ring_prod = prod = bp->rx_prod;
4485         for (i = 0; i < bp->rx_ring_size; i++) {
4486                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4487                         break;
4488                 }
4489                 prod = NEXT_RX_BD(prod);
4490                 ring_prod = RX_RING_IDX(prod);
4491         }
4492         bp->rx_prod = prod;
4493
4494         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4495         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4496
4497         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4498 }
4499
4500 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4501 {
4502         u32 max, num_rings = 1;
4503
4504         while (ring_size > MAX_RX_DESC_CNT) {
4505                 ring_size -= MAX_RX_DESC_CNT;
4506                 num_rings++;
4507         }
4508         /* round to next power of 2 */
4509         max = max_size;
4510         while ((max & num_rings) == 0)
4511                 max >>= 1;
4512
4513         if (num_rings != max)
4514                 max <<= 1;
4515
4516         return max;
4517 }
4518
4519 static void
4520 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4521 {
4522         u32 rx_size, rx_space, jumbo_size;
4523
4524         /* 8 for CRC and VLAN */
4525         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4526
4527         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4528                 sizeof(struct skb_shared_info);
4529
4530         bp->rx_copy_thresh = RX_COPY_THRESH;
4531         bp->rx_pg_ring_size = 0;
4532         bp->rx_max_pg_ring = 0;
4533         bp->rx_max_pg_ring_idx = 0;
4534         if (rx_space > PAGE_SIZE) {
4535                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4536
4537                 jumbo_size = size * pages;
4538                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4539                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4540
4541                 bp->rx_pg_ring_size = jumbo_size;
4542                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4543                                                         MAX_RX_PG_RINGS);
4544                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4545                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4546                 bp->rx_copy_thresh = 0;
4547         }
4548
4549         bp->rx_buf_use_size = rx_size;
4550         /* hw alignment */
4551         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4552         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4553         bp->rx_ring_size = size;
4554         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4555         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4556 }
4557
4558 static void
4559 bnx2_free_tx_skbs(struct bnx2 *bp)
4560 {
4561         int i;
4562
4563         if (bp->tx_buf_ring == NULL)
4564                 return;
4565
4566         for (i = 0; i < TX_DESC_CNT; ) {
4567                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4568                 struct sk_buff *skb = tx_buf->skb;
4569                 int j, last;
4570
4571                 if (skb == NULL) {
4572                         i++;
4573                         continue;
4574                 }
4575
4576                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4577                         skb_headlen(skb), PCI_DMA_TODEVICE);
4578
4579                 tx_buf->skb = NULL;
4580
4581                 last = skb_shinfo(skb)->nr_frags;
4582                 for (j = 0; j < last; j++) {
4583                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4584                         pci_unmap_page(bp->pdev,
4585                                 pci_unmap_addr(tx_buf, mapping),
4586                                 skb_shinfo(skb)->frags[j].size,
4587                                 PCI_DMA_TODEVICE);
4588                 }
4589                 dev_kfree_skb(skb);
4590                 i += j + 1;
4591         }
4592
4593 }
4594
4595 static void
4596 bnx2_free_rx_skbs(struct bnx2 *bp)
4597 {
4598         int i;
4599
4600         if (bp->rx_buf_ring == NULL)
4601                 return;
4602
4603         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4604                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4605                 struct sk_buff *skb = rx_buf->skb;
4606
4607                 if (skb == NULL)
4608                         continue;
4609
4610                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4611                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4612
4613                 rx_buf->skb = NULL;
4614
4615                 dev_kfree_skb(skb);
4616         }
4617         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4618                 bnx2_free_rx_page(bp, i);
4619 }
4620
4621 static void
4622 bnx2_free_skbs(struct bnx2 *bp)
4623 {
4624         bnx2_free_tx_skbs(bp);
4625         bnx2_free_rx_skbs(bp);
4626 }
4627
4628 static int
4629 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4630 {
4631         int rc;
4632
4633         rc = bnx2_reset_chip(bp, reset_code);
4634         bnx2_free_skbs(bp);
4635         if (rc)
4636                 return rc;
4637
4638         if ((rc = bnx2_init_chip(bp)) != 0)
4639                 return rc;
4640
4641         bnx2_init_tx_ring(bp);
4642         bnx2_init_rx_ring(bp);
4643         return 0;
4644 }
4645
4646 static int
4647 bnx2_init_nic(struct bnx2 *bp)
4648 {
4649         int rc;
4650
4651         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4652                 return rc;
4653
4654         spin_lock_bh(&bp->phy_lock);
4655         bnx2_init_phy(bp);
4656         bnx2_set_link(bp);
4657         spin_unlock_bh(&bp->phy_lock);
4658         return 0;
4659 }
4660
4661 static int
4662 bnx2_test_registers(struct bnx2 *bp)
4663 {
4664         int ret;
4665         int i, is_5709;
4666         static const struct {
4667                 u16   offset;
4668                 u16   flags;
4669 #define BNX2_FL_NOT_5709        1
4670                 u32   rw_mask;
4671                 u32   ro_mask;
4672         } reg_tbl[] = {
4673                 { 0x006c, 0, 0x00000000, 0x0000003f },
4674                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4675                 { 0x0094, 0, 0x00000000, 0x00000000 },
4676
4677                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4678                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4679                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4680                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4681                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4682                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4683                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4684                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4685                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4686
4687                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4688                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4689                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4690                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4691                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4692                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4693
4694                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4695                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4696                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4697
4698                 { 0x1000, 0, 0x00000000, 0x00000001 },
4699                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4700
4701                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4702                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4703                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4704                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4705                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4706                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4707                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4708                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4709                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4710                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4711
4712                 { 0x1800, 0, 0x00000000, 0x00000001 },
4713                 { 0x1804, 0, 0x00000000, 0x00000003 },
4714
4715                 { 0x2800, 0, 0x00000000, 0x00000001 },
4716                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4717                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4718                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4719                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4720                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4721                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4722                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4723                 { 0x2840, 0, 0x00000000, 0xffffffff },
4724                 { 0x2844, 0, 0x00000000, 0xffffffff },
4725                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4726                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4727
4728                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4729                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4730
4731                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4732                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4733                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4734                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4735                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4736                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4737                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4738                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4739                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4740
4741                 { 0x5004, 0, 0x00000000, 0x0000007f },
4742                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4743
4744                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4745                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4746                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4747                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4748                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4749                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4750                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4751                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4752                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4753
4754                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4755                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4756                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4757                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4758                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4759                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4760                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4761                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4762                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4763                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4764                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4765                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4766                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4767                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4768                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4769                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4770                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4771                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4772                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4773                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4774                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4775                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4776                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4777
4778                 { 0xffff, 0, 0x00000000, 0x00000000 },
4779         };
4780
4781         ret = 0;
4782         is_5709 = 0;
4783         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4784                 is_5709 = 1;
4785
4786         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4787                 u32 offset, rw_mask, ro_mask, save_val, val;
4788                 u16 flags = reg_tbl[i].flags;
4789
4790                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4791                         continue;
4792
4793                 offset = (u32) reg_tbl[i].offset;
4794                 rw_mask = reg_tbl[i].rw_mask;
4795                 ro_mask = reg_tbl[i].ro_mask;
4796
4797                 save_val = readl(bp->regview + offset);
4798
4799                 writel(0, bp->regview + offset);
4800
4801                 val = readl(bp->regview + offset);
4802                 if ((val & rw_mask) != 0) {
4803                         goto reg_test_err;
4804                 }
4805
4806                 if ((val & ro_mask) != (save_val & ro_mask)) {
4807                         goto reg_test_err;
4808                 }
4809
4810                 writel(0xffffffff, bp->regview + offset);
4811
4812                 val = readl(bp->regview + offset);
4813                 if ((val & rw_mask) != rw_mask) {
4814                         goto reg_test_err;
4815                 }
4816
4817                 if ((val & ro_mask) != (save_val & ro_mask)) {
4818                         goto reg_test_err;
4819                 }
4820
4821                 writel(save_val, bp->regview + offset);
4822                 continue;
4823
4824 reg_test_err:
4825                 writel(save_val, bp->regview + offset);
4826                 ret = -ENODEV;
4827                 break;
4828         }
4829         return ret;
4830 }
4831
4832 static int
4833 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4834 {
4835         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4836                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4837         int i;
4838
4839         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4840                 u32 offset;
4841
4842                 for (offset = 0; offset < size; offset += 4) {
4843
4844                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4845
4846                         if (REG_RD_IND(bp, start + offset) !=
4847                                 test_pattern[i]) {
4848                                 return -ENODEV;
4849                         }
4850                 }
4851         }
4852         return 0;
4853 }
4854
4855 static int
4856 bnx2_test_memory(struct bnx2 *bp)
4857 {
4858         int ret = 0;
4859         int i;
4860         static struct mem_entry {
4861                 u32   offset;
4862                 u32   len;
4863         } mem_tbl_5706[] = {
4864                 { 0x60000,  0x4000 },
4865                 { 0xa0000,  0x3000 },
4866                 { 0xe0000,  0x4000 },
4867                 { 0x120000, 0x4000 },
4868                 { 0x1a0000, 0x4000 },
4869                 { 0x160000, 0x4000 },
4870                 { 0xffffffff, 0    },
4871         },
4872         mem_tbl_5709[] = {
4873                 { 0x60000,  0x4000 },
4874                 { 0xa0000,  0x3000 },
4875                 { 0xe0000,  0x4000 },
4876                 { 0x120000, 0x4000 },
4877                 { 0x1a0000, 0x4000 },
4878                 { 0xffffffff, 0    },
4879         };
4880         struct mem_entry *mem_tbl;
4881
4882         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4883                 mem_tbl = mem_tbl_5709;
4884         else
4885                 mem_tbl = mem_tbl_5706;
4886
4887         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4888                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4889                         mem_tbl[i].len)) != 0) {
4890                         return ret;
4891                 }
4892         }
4893
4894         return ret;
4895 }
4896
4897 #define BNX2_MAC_LOOPBACK       0
4898 #define BNX2_PHY_LOOPBACK       1
4899
4900 static int
4901 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4902 {
4903         unsigned int pkt_size, num_pkts, i;
4904         struct sk_buff *skb, *rx_skb;
4905         unsigned char *packet;
4906         u16 rx_start_idx, rx_idx;
4907         dma_addr_t map;
4908         struct tx_bd *txbd;
4909         struct sw_bd *rx_buf;
4910         struct l2_fhdr *rx_hdr;
4911         int ret = -ENODEV;
4912         struct bnx2_napi *bnapi = &bp->bnx2_napi;
4913
4914         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4915                 bp->loopback = MAC_LOOPBACK;
4916                 bnx2_set_mac_loopback(bp);
4917         }
4918         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4919                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4920                         return 0;
4921
4922                 bp->loopback = PHY_LOOPBACK;
4923                 bnx2_set_phy_loopback(bp);
4924         }
4925         else
4926                 return -EINVAL;
4927
4928         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4929         skb = netdev_alloc_skb(bp->dev, pkt_size);
4930         if (!skb)
4931                 return -ENOMEM;
4932         packet = skb_put(skb, pkt_size);
4933         memcpy(packet, bp->dev->dev_addr, 6);
4934         memset(packet + 6, 0x0, 8);
4935         for (i = 14; i < pkt_size; i++)
4936                 packet[i] = (unsigned char) (i & 0xff);
4937
4938         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4939                 PCI_DMA_TODEVICE);
4940
4941         REG_WR(bp, BNX2_HC_COMMAND,
4942                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4943
4944         REG_RD(bp, BNX2_HC_COMMAND);
4945
4946         udelay(5);
4947         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
4948
4949         num_pkts = 0;
4950
4951         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4952
4953         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4954         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4955         txbd->tx_bd_mss_nbytes = pkt_size;
4956         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4957
4958         num_pkts++;
4959         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4960         bp->tx_prod_bseq += pkt_size;
4961
4962         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4963         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4964
4965         udelay(100);
4966
4967         REG_WR(bp, BNX2_HC_COMMAND,
4968                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4969
4970         REG_RD(bp, BNX2_HC_COMMAND);
4971
4972         udelay(5);
4973
4974         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4975         dev_kfree_skb(skb);
4976
4977         if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
4978                 goto loopback_test_done;
4979
4980         rx_idx = bnx2_get_hw_rx_cons(bnapi);
4981         if (rx_idx != rx_start_idx + num_pkts) {
4982                 goto loopback_test_done;
4983         }
4984
4985         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4986         rx_skb = rx_buf->skb;
4987
4988         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4989         skb_reserve(rx_skb, bp->rx_offset);
4990
4991         pci_dma_sync_single_for_cpu(bp->pdev,
4992                 pci_unmap_addr(rx_buf, mapping),
4993                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4994
4995         if (rx_hdr->l2_fhdr_status &
4996                 (L2_FHDR_ERRORS_BAD_CRC |
4997                 L2_FHDR_ERRORS_PHY_DECODE |
4998                 L2_FHDR_ERRORS_ALIGNMENT |
4999                 L2_FHDR_ERRORS_TOO_SHORT |
5000                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5001
5002                 goto loopback_test_done;
5003         }
5004
5005         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5006                 goto loopback_test_done;
5007         }
5008
5009         for (i = 14; i < pkt_size; i++) {
5010                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5011                         goto loopback_test_done;
5012                 }
5013         }
5014
5015         ret = 0;
5016
5017 loopback_test_done:
5018         bp->loopback = 0;
5019         return ret;
5020 }
5021
5022 #define BNX2_MAC_LOOPBACK_FAILED        1
5023 #define BNX2_PHY_LOOPBACK_FAILED        2
5024 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5025                                          BNX2_PHY_LOOPBACK_FAILED)
5026
5027 static int
5028 bnx2_test_loopback(struct bnx2 *bp)
5029 {
5030         int rc = 0;
5031
5032         if (!netif_running(bp->dev))
5033                 return BNX2_LOOPBACK_FAILED;
5034
5035         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5036         spin_lock_bh(&bp->phy_lock);
5037         bnx2_init_phy(bp);
5038         spin_unlock_bh(&bp->phy_lock);
5039         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5040                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5041         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5042                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5043         return rc;
5044 }
5045
5046 #define NVRAM_SIZE 0x200
5047 #define CRC32_RESIDUAL 0xdebb20e3
5048
5049 static int
5050 bnx2_test_nvram(struct bnx2 *bp)
5051 {
5052         u32 buf[NVRAM_SIZE / 4];
5053         u8 *data = (u8 *) buf;
5054         int rc = 0;
5055         u32 magic, csum;
5056
5057         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5058                 goto test_nvram_done;
5059
5060         magic = be32_to_cpu(buf[0]);
5061         if (magic != 0x669955aa) {
5062                 rc = -ENODEV;
5063                 goto test_nvram_done;
5064         }
5065
5066         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5067                 goto test_nvram_done;
5068
5069         csum = ether_crc_le(0x100, data);
5070         if (csum != CRC32_RESIDUAL) {
5071                 rc = -ENODEV;
5072                 goto test_nvram_done;
5073         }
5074
5075         csum = ether_crc_le(0x100, data + 0x100);
5076         if (csum != CRC32_RESIDUAL) {
5077                 rc = -ENODEV;
5078         }
5079
5080 test_nvram_done:
5081         return rc;
5082 }
5083
5084 static int
5085 bnx2_test_link(struct bnx2 *bp)
5086 {
5087         u32 bmsr;
5088
5089         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5090                 if (bp->link_up)
5091                         return 0;
5092                 return -ENODEV;
5093         }
5094         spin_lock_bh(&bp->phy_lock);
5095         bnx2_enable_bmsr1(bp);
5096         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5097         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5098         bnx2_disable_bmsr1(bp);
5099         spin_unlock_bh(&bp->phy_lock);
5100
5101         if (bmsr & BMSR_LSTATUS) {
5102                 return 0;
5103         }
5104         return -ENODEV;
5105 }
5106
5107 static int
5108 bnx2_test_intr(struct bnx2 *bp)
5109 {
5110         int i;
5111         u16 status_idx;
5112
5113         if (!netif_running(bp->dev))
5114                 return -ENODEV;
5115
5116         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5117
5118         /* This register is not touched during run-time. */
5119         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5120         REG_RD(bp, BNX2_HC_COMMAND);
5121
5122         for (i = 0; i < 10; i++) {
5123                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5124                         status_idx) {
5125
5126                         break;
5127                 }
5128
5129                 msleep_interruptible(10);
5130         }
5131         if (i < 10)
5132                 return 0;
5133
5134         return -ENODEV;
5135 }
5136
5137 static void
5138 bnx2_5706_serdes_timer(struct bnx2 *bp)
5139 {
5140         spin_lock(&bp->phy_lock);
5141         if (bp->serdes_an_pending)
5142                 bp->serdes_an_pending--;
5143         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5144                 u32 bmcr;
5145
5146                 bp->current_interval = bp->timer_interval;
5147
5148                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5149
5150                 if (bmcr & BMCR_ANENABLE) {
5151                         u32 phy1, phy2;
5152
5153                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5154                         bnx2_read_phy(bp, 0x1c, &phy1);
5155
5156                         bnx2_write_phy(bp, 0x17, 0x0f01);
5157                         bnx2_read_phy(bp, 0x15, &phy2);
5158                         bnx2_write_phy(bp, 0x17, 0x0f01);
5159                         bnx2_read_phy(bp, 0x15, &phy2);
5160
5161                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5162                                 !(phy2 & 0x20)) {       /* no CONFIG */
5163
5164                                 bmcr &= ~BMCR_ANENABLE;
5165                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5166                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5167                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5168                         }
5169                 }
5170         }
5171         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5172                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5173                 u32 phy2;
5174
5175                 bnx2_write_phy(bp, 0x17, 0x0f01);
5176                 bnx2_read_phy(bp, 0x15, &phy2);
5177                 if (phy2 & 0x20) {
5178                         u32 bmcr;
5179
5180                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5181                         bmcr |= BMCR_ANENABLE;
5182                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5183
5184                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5185                 }
5186         } else
5187                 bp->current_interval = bp->timer_interval;
5188
5189         spin_unlock(&bp->phy_lock);
5190 }
5191
5192 static void
5193 bnx2_5708_serdes_timer(struct bnx2 *bp)
5194 {
5195         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5196                 return;
5197
5198         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5199                 bp->serdes_an_pending = 0;
5200                 return;
5201         }
5202
5203         spin_lock(&bp->phy_lock);
5204         if (bp->serdes_an_pending)
5205                 bp->serdes_an_pending--;
5206         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5207                 u32 bmcr;
5208
5209                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5210                 if (bmcr & BMCR_ANENABLE) {
5211                         bnx2_enable_forced_2g5(bp);
5212                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5213                 } else {
5214                         bnx2_disable_forced_2g5(bp);
5215                         bp->serdes_an_pending = 2;
5216                         bp->current_interval = bp->timer_interval;
5217                 }
5218
5219         } else
5220                 bp->current_interval = bp->timer_interval;
5221
5222         spin_unlock(&bp->phy_lock);
5223 }
5224
5225 static void
5226 bnx2_timer(unsigned long data)
5227 {
5228         struct bnx2 *bp = (struct bnx2 *) data;
5229
5230         if (!netif_running(bp->dev))
5231                 return;
5232
5233         if (atomic_read(&bp->intr_sem) != 0)
5234                 goto bnx2_restart_timer;
5235
5236         bnx2_send_heart_beat(bp);
5237
5238         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5239
5240         /* workaround occasional corrupted counters */
5241         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5242                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5243                                             BNX2_HC_COMMAND_STATS_NOW);
5244
5245         if (bp->phy_flags & PHY_SERDES_FLAG) {
5246                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5247                         bnx2_5706_serdes_timer(bp);
5248                 else
5249                         bnx2_5708_serdes_timer(bp);
5250         }
5251
5252 bnx2_restart_timer:
5253         mod_timer(&bp->timer, jiffies + bp->current_interval);
5254 }
5255
5256 static int
5257 bnx2_request_irq(struct bnx2 *bp)
5258 {
5259         struct net_device *dev = bp->dev;
5260         unsigned long flags;
5261         struct bnx2_irq *irq = &bp->irq_tbl[0];
5262         int rc;
5263
5264         if (bp->flags & USING_MSI_FLAG)
5265                 flags = 0;
5266         else
5267                 flags = IRQF_SHARED;
5268         rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
5269         return rc;
5270 }
5271
5272 static void
5273 bnx2_free_irq(struct bnx2 *bp)
5274 {
5275         struct net_device *dev = bp->dev;
5276
5277         free_irq(bp->irq_tbl[0].vector, dev);
5278         if (bp->flags & USING_MSI_FLAG) {
5279                 pci_disable_msi(bp->pdev);
5280                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5281         }
5282 }
5283
5284 static void
5285 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5286 {
5287         bp->irq_tbl[0].handler = bnx2_interrupt;
5288         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5289
5290         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5291                 if (pci_enable_msi(bp->pdev) == 0) {
5292                         bp->flags |= USING_MSI_FLAG;
5293                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5294                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5295                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5296                         } else
5297                                 bp->irq_tbl[0].handler = bnx2_msi;
5298                 }
5299         }
5300
5301         bp->irq_tbl[0].vector = bp->pdev->irq;
5302 }
5303
5304 /* Called with rtnl_lock */
5305 static int
5306 bnx2_open(struct net_device *dev)
5307 {
5308         struct bnx2 *bp = netdev_priv(dev);
5309         int rc;
5310
5311         netif_carrier_off(dev);
5312
5313         bnx2_set_power_state(bp, PCI_D0);
5314         bnx2_disable_int(bp);
5315
5316         rc = bnx2_alloc_mem(bp);
5317         if (rc)
5318                 return rc;
5319
5320         bnx2_setup_int_mode(bp, disable_msi);
5321         bnx2_napi_enable(bp);
5322         rc = bnx2_request_irq(bp);
5323
5324         if (rc) {
5325                 bnx2_napi_disable(bp);
5326                 bnx2_free_mem(bp);
5327                 return rc;
5328         }
5329
5330         rc = bnx2_init_nic(bp);
5331
5332         if (rc) {
5333                 bnx2_napi_disable(bp);
5334                 bnx2_free_irq(bp);
5335                 bnx2_free_skbs(bp);
5336                 bnx2_free_mem(bp);
5337                 return rc;
5338         }
5339
5340         mod_timer(&bp->timer, jiffies + bp->current_interval);
5341
5342         atomic_set(&bp->intr_sem, 0);
5343
5344         bnx2_enable_int(bp);
5345
5346         if (bp->flags & USING_MSI_FLAG) {
5347                 /* Test MSI to make sure it is working
5348                  * If MSI test fails, go back to INTx mode
5349                  */
5350                 if (bnx2_test_intr(bp) != 0) {
5351                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5352                                " using MSI, switching to INTx mode. Please"
5353                                " report this failure to the PCI maintainer"
5354                                " and include system chipset information.\n",
5355                                bp->dev->name);
5356
5357                         bnx2_disable_int(bp);
5358                         bnx2_free_irq(bp);
5359
5360                         bnx2_setup_int_mode(bp, 1);
5361
5362                         rc = bnx2_init_nic(bp);
5363
5364                         if (!rc)
5365                                 rc = bnx2_request_irq(bp);
5366
5367                         if (rc) {
5368                                 bnx2_napi_disable(bp);
5369                                 bnx2_free_skbs(bp);
5370                                 bnx2_free_mem(bp);
5371                                 del_timer_sync(&bp->timer);
5372                                 return rc;
5373                         }
5374                         bnx2_enable_int(bp);
5375                 }
5376         }
5377         if (bp->flags & USING_MSI_FLAG) {
5378                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5379         }
5380
5381         netif_start_queue(dev);
5382
5383         return 0;
5384 }
5385
5386 static void
5387 bnx2_reset_task(struct work_struct *work)
5388 {
5389         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5390
5391         if (!netif_running(bp->dev))
5392                 return;
5393
5394         bp->in_reset_task = 1;
5395         bnx2_netif_stop(bp);
5396
5397         bnx2_init_nic(bp);
5398
5399         atomic_set(&bp->intr_sem, 1);
5400         bnx2_netif_start(bp);
5401         bp->in_reset_task = 0;
5402 }
5403
5404 static void
5405 bnx2_tx_timeout(struct net_device *dev)
5406 {
5407         struct bnx2 *bp = netdev_priv(dev);
5408
5409         /* This allows the netif to be shutdown gracefully before resetting */
5410         schedule_work(&bp->reset_task);
5411 }
5412
5413 #ifdef BCM_VLAN
5414 /* Called with rtnl_lock */
5415 static void
5416 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5417 {
5418         struct bnx2 *bp = netdev_priv(dev);
5419
5420         bnx2_netif_stop(bp);
5421
5422         bp->vlgrp = vlgrp;
5423         bnx2_set_rx_mode(dev);
5424
5425         bnx2_netif_start(bp);
5426 }
5427 #endif
5428
5429 /* Called with netif_tx_lock.
5430  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5431  * netif_wake_queue().
5432  */
5433 static int
5434 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5435 {
5436         struct bnx2 *bp = netdev_priv(dev);
5437         dma_addr_t mapping;
5438         struct tx_bd *txbd;
5439         struct sw_bd *tx_buf;
5440         u32 len, vlan_tag_flags, last_frag, mss;
5441         u16 prod, ring_prod;
5442         int i;
5443
5444         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5445                 netif_stop_queue(dev);
5446                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5447                         dev->name);
5448
5449                 return NETDEV_TX_BUSY;
5450         }
5451         len = skb_headlen(skb);
5452         prod = bp->tx_prod;
5453         ring_prod = TX_RING_IDX(prod);
5454
5455         vlan_tag_flags = 0;
5456         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5457                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5458         }
5459
5460         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5461                 vlan_tag_flags |=
5462                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5463         }
5464         if ((mss = skb_shinfo(skb)->gso_size)) {
5465                 u32 tcp_opt_len, ip_tcp_len;
5466                 struct iphdr *iph;
5467
5468                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5469
5470                 tcp_opt_len = tcp_optlen(skb);
5471
5472                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5473                         u32 tcp_off = skb_transport_offset(skb) -
5474                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5475
5476                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5477                                           TX_BD_FLAGS_SW_FLAGS;
5478                         if (likely(tcp_off == 0))
5479                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5480                         else {
5481                                 tcp_off >>= 3;
5482                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5483                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5484                                                   ((tcp_off & 0x10) <<
5485                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5486                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5487                         }
5488                 } else {
5489                         if (skb_header_cloned(skb) &&
5490                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5491                                 dev_kfree_skb(skb);
5492                                 return NETDEV_TX_OK;
5493                         }
5494
5495                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5496
5497                         iph = ip_hdr(skb);
5498                         iph->check = 0;
5499                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5500                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5501                                                                  iph->daddr, 0,
5502                                                                  IPPROTO_TCP,
5503                                                                  0);
5504                         if (tcp_opt_len || (iph->ihl > 5)) {
5505                                 vlan_tag_flags |= ((iph->ihl - 5) +
5506                                                    (tcp_opt_len >> 2)) << 8;
5507                         }
5508                 }
5509         } else
5510                 mss = 0;
5511
5512         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5513
5514         tx_buf = &bp->tx_buf_ring[ring_prod];
5515         tx_buf->skb = skb;
5516         pci_unmap_addr_set(tx_buf, mapping, mapping);
5517
5518         txbd = &bp->tx_desc_ring[ring_prod];
5519
5520         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5521         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5522         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5523         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5524
5525         last_frag = skb_shinfo(skb)->nr_frags;
5526
5527         for (i = 0; i < last_frag; i++) {
5528                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5529
5530                 prod = NEXT_TX_BD(prod);
5531                 ring_prod = TX_RING_IDX(prod);
5532                 txbd = &bp->tx_desc_ring[ring_prod];
5533
5534                 len = frag->size;
5535                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5536                         len, PCI_DMA_TODEVICE);
5537                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5538                                 mapping, mapping);
5539
5540                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5541                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5542                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5543                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5544
5545         }
5546         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5547
5548         prod = NEXT_TX_BD(prod);
5549         bp->tx_prod_bseq += skb->len;
5550
5551         REG_WR16(bp, bp->tx_bidx_addr, prod);
5552         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5553
5554         mmiowb();
5555
5556         bp->tx_prod = prod;
5557         dev->trans_start = jiffies;
5558
5559         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5560                 netif_stop_queue(dev);
5561                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5562                         netif_wake_queue(dev);
5563         }
5564
5565         return NETDEV_TX_OK;
5566 }
5567
5568 /* Called with rtnl_lock */
5569 static int
5570 bnx2_close(struct net_device *dev)
5571 {
5572         struct bnx2 *bp = netdev_priv(dev);
5573         u32 reset_code;
5574
5575         /* Calling flush_scheduled_work() may deadlock because
5576          * linkwatch_event() may be on the workqueue and it will try to get
5577          * the rtnl_lock which we are holding.
5578          */
5579         while (bp->in_reset_task)
5580                 msleep(1);
5581
5582         bnx2_disable_int_sync(bp);
5583         bnx2_napi_disable(bp);
5584         del_timer_sync(&bp->timer);
5585         if (bp->flags & NO_WOL_FLAG)
5586                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5587         else if (bp->wol)
5588                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5589         else
5590                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5591         bnx2_reset_chip(bp, reset_code);
5592         bnx2_free_irq(bp);
5593         bnx2_free_skbs(bp);
5594         bnx2_free_mem(bp);
5595         bp->link_up = 0;
5596         netif_carrier_off(bp->dev);
5597         bnx2_set_power_state(bp, PCI_D3hot);
5598         return 0;
5599 }
5600
5601 #define GET_NET_STATS64(ctr)                                    \
5602         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5603         (unsigned long) (ctr##_lo)
5604
5605 #define GET_NET_STATS32(ctr)            \
5606         (ctr##_lo)
5607
5608 #if (BITS_PER_LONG == 64)
5609 #define GET_NET_STATS   GET_NET_STATS64
5610 #else
5611 #define GET_NET_STATS   GET_NET_STATS32
5612 #endif
5613
5614 static struct net_device_stats *
5615 bnx2_get_stats(struct net_device *dev)
5616 {
5617         struct bnx2 *bp = netdev_priv(dev);
5618         struct statistics_block *stats_blk = bp->stats_blk;
5619         struct net_device_stats *net_stats = &bp->net_stats;
5620
5621         if (bp->stats_blk == NULL) {
5622                 return net_stats;
5623         }
5624         net_stats->rx_packets =
5625                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5626                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5627                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5628
5629         net_stats->tx_packets =
5630                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5631                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5632                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5633
5634         net_stats->rx_bytes =
5635                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5636
5637         net_stats->tx_bytes =
5638                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5639
5640         net_stats->multicast =
5641                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5642
5643         net_stats->collisions =
5644                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5645
5646         net_stats->rx_length_errors =
5647                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5648                 stats_blk->stat_EtherStatsOverrsizePkts);
5649
5650         net_stats->rx_over_errors =
5651                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5652
5653         net_stats->rx_frame_errors =
5654                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5655
5656         net_stats->rx_crc_errors =
5657                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5658
5659         net_stats->rx_errors = net_stats->rx_length_errors +
5660                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5661                 net_stats->rx_crc_errors;
5662
5663         net_stats->tx_aborted_errors =
5664                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5665                 stats_blk->stat_Dot3StatsLateCollisions);
5666
5667         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5668             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5669                 net_stats->tx_carrier_errors = 0;
5670         else {
5671                 net_stats->tx_carrier_errors =
5672                         (unsigned long)
5673                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5674         }
5675
5676         net_stats->tx_errors =
5677                 (unsigned long)
5678                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5679                 +
5680                 net_stats->tx_aborted_errors +
5681                 net_stats->tx_carrier_errors;
5682
5683         net_stats->rx_missed_errors =
5684                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5685                 stats_blk->stat_FwRxDrop);
5686
5687         return net_stats;
5688 }
5689
5690 /* All ethtool functions called with rtnl_lock */
5691
5692 static int
5693 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5694 {
5695         struct bnx2 *bp = netdev_priv(dev);
5696         int support_serdes = 0, support_copper = 0;
5697
5698         cmd->supported = SUPPORTED_Autoneg;
5699         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5700                 support_serdes = 1;
5701                 support_copper = 1;
5702         } else if (bp->phy_port == PORT_FIBRE)
5703                 support_serdes = 1;
5704         else
5705                 support_copper = 1;
5706
5707         if (support_serdes) {
5708                 cmd->supported |= SUPPORTED_1000baseT_Full |
5709                         SUPPORTED_FIBRE;
5710                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5711                         cmd->supported |= SUPPORTED_2500baseX_Full;
5712
5713         }
5714         if (support_copper) {
5715                 cmd->supported |= SUPPORTED_10baseT_Half |
5716                         SUPPORTED_10baseT_Full |
5717                         SUPPORTED_100baseT_Half |
5718                         SUPPORTED_100baseT_Full |
5719                         SUPPORTED_1000baseT_Full |
5720                         SUPPORTED_TP;
5721
5722         }
5723
5724         spin_lock_bh(&bp->phy_lock);
5725         cmd->port = bp->phy_port;
5726         cmd->advertising = bp->advertising;
5727
5728         if (bp->autoneg & AUTONEG_SPEED) {
5729                 cmd->autoneg = AUTONEG_ENABLE;
5730         }
5731         else {
5732                 cmd->autoneg = AUTONEG_DISABLE;
5733         }
5734
5735         if (netif_carrier_ok(dev)) {
5736                 cmd->speed = bp->line_speed;
5737                 cmd->duplex = bp->duplex;
5738         }
5739         else {
5740                 cmd->speed = -1;
5741                 cmd->duplex = -1;
5742         }
5743         spin_unlock_bh(&bp->phy_lock);
5744
5745         cmd->transceiver = XCVR_INTERNAL;
5746         cmd->phy_address = bp->phy_addr;
5747
5748         return 0;
5749 }
5750
5751 static int
5752 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5753 {
5754         struct bnx2 *bp = netdev_priv(dev);
5755         u8 autoneg = bp->autoneg;
5756         u8 req_duplex = bp->req_duplex;
5757         u16 req_line_speed = bp->req_line_speed;
5758         u32 advertising = bp->advertising;
5759         int err = -EINVAL;
5760
5761         spin_lock_bh(&bp->phy_lock);
5762
5763         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5764                 goto err_out_unlock;
5765
5766         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5767                 goto err_out_unlock;
5768
5769         if (cmd->autoneg == AUTONEG_ENABLE) {
5770                 autoneg |= AUTONEG_SPEED;
5771
5772                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5773
5774                 /* allow advertising 1 speed */
5775                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5776                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5777                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5778                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5779
5780                         if (cmd->port == PORT_FIBRE)
5781                                 goto err_out_unlock;
5782
5783                         advertising = cmd->advertising;
5784
5785                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5786                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5787                             (cmd->port == PORT_TP))
5788                                 goto err_out_unlock;
5789                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5790                         advertising = cmd->advertising;
5791                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5792                         goto err_out_unlock;
5793                 else {
5794                         if (cmd->port == PORT_FIBRE)
5795                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5796                         else
5797                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5798                 }
5799                 advertising |= ADVERTISED_Autoneg;
5800         }
5801         else {
5802                 if (cmd->port == PORT_FIBRE) {
5803                         if ((cmd->speed != SPEED_1000 &&
5804                              cmd->speed != SPEED_2500) ||
5805                             (cmd->duplex != DUPLEX_FULL))
5806                                 goto err_out_unlock;
5807
5808                         if (cmd->speed == SPEED_2500 &&
5809                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5810                                 goto err_out_unlock;
5811                 }
5812                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5813                         goto err_out_unlock;
5814
5815                 autoneg &= ~AUTONEG_SPEED;
5816                 req_line_speed = cmd->speed;
5817                 req_duplex = cmd->duplex;
5818                 advertising = 0;
5819         }
5820
5821         bp->autoneg = autoneg;
5822         bp->advertising = advertising;
5823         bp->req_line_speed = req_line_speed;
5824         bp->req_duplex = req_duplex;
5825
5826         err = bnx2_setup_phy(bp, cmd->port);
5827
5828 err_out_unlock:
5829         spin_unlock_bh(&bp->phy_lock);
5830
5831         return err;
5832 }
5833
5834 static void
5835 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5836 {
5837         struct bnx2 *bp = netdev_priv(dev);
5838
5839         strcpy(info->driver, DRV_MODULE_NAME);
5840         strcpy(info->version, DRV_MODULE_VERSION);
5841         strcpy(info->bus_info, pci_name(bp->pdev));
5842         strcpy(info->fw_version, bp->fw_version);
5843 }
5844
5845 #define BNX2_REGDUMP_LEN                (32 * 1024)
5846
5847 static int
5848 bnx2_get_regs_len(struct net_device *dev)
5849 {
5850         return BNX2_REGDUMP_LEN;
5851 }
5852
5853 static void
5854 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5855 {
5856         u32 *p = _p, i, offset;
5857         u8 *orig_p = _p;
5858         struct bnx2 *bp = netdev_priv(dev);
5859         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5860                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5861                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5862                                  0x1040, 0x1048, 0x1080, 0x10a4,
5863                                  0x1400, 0x1490, 0x1498, 0x14f0,
5864                                  0x1500, 0x155c, 0x1580, 0x15dc,
5865                                  0x1600, 0x1658, 0x1680, 0x16d8,
5866                                  0x1800, 0x1820, 0x1840, 0x1854,
5867                                  0x1880, 0x1894, 0x1900, 0x1984,
5868                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5869                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5870                                  0x2000, 0x2030, 0x23c0, 0x2400,
5871                                  0x2800, 0x2820, 0x2830, 0x2850,
5872                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5873                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5874                                  0x4080, 0x4090, 0x43c0, 0x4458,
5875                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5876                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5877                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5878                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5879                                  0x6800, 0x6848, 0x684c, 0x6860,
5880                                  0x6888, 0x6910, 0x8000 };
5881
5882         regs->version = 0;
5883
5884         memset(p, 0, BNX2_REGDUMP_LEN);
5885
5886         if (!netif_running(bp->dev))
5887                 return;
5888
5889         i = 0;
5890         offset = reg_boundaries[0];
5891         p += offset;
5892         while (offset < BNX2_REGDUMP_LEN) {
5893                 *p++ = REG_RD(bp, offset);
5894                 offset += 4;
5895                 if (offset == reg_boundaries[i + 1]) {
5896                         offset = reg_boundaries[i + 2];
5897                         p = (u32 *) (orig_p + offset);
5898                         i += 2;
5899                 }
5900         }
5901 }
5902
5903 static void
5904 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5905 {
5906         struct bnx2 *bp = netdev_priv(dev);
5907
5908         if (bp->flags & NO_WOL_FLAG) {
5909                 wol->supported = 0;
5910                 wol->wolopts = 0;
5911         }
5912         else {
5913                 wol->supported = WAKE_MAGIC;
5914                 if (bp->wol)
5915                         wol->wolopts = WAKE_MAGIC;
5916                 else
5917                         wol->wolopts = 0;
5918         }
5919         memset(&wol->sopass, 0, sizeof(wol->sopass));
5920 }
5921
5922 static int
5923 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5924 {
5925         struct bnx2 *bp = netdev_priv(dev);
5926
5927         if (wol->wolopts & ~WAKE_MAGIC)
5928                 return -EINVAL;
5929
5930         if (wol->wolopts & WAKE_MAGIC) {
5931                 if (bp->flags & NO_WOL_FLAG)
5932                         return -EINVAL;
5933
5934                 bp->wol = 1;
5935         }
5936         else {
5937                 bp->wol = 0;
5938         }
5939         return 0;
5940 }
5941
5942 static int
5943 bnx2_nway_reset(struct net_device *dev)
5944 {
5945         struct bnx2 *bp = netdev_priv(dev);
5946         u32 bmcr;
5947
5948         if (!(bp->autoneg & AUTONEG_SPEED)) {
5949                 return -EINVAL;
5950         }
5951
5952         spin_lock_bh(&bp->phy_lock);
5953
5954         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5955                 int rc;
5956
5957                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5958                 spin_unlock_bh(&bp->phy_lock);
5959                 return rc;
5960         }
5961
5962         /* Force a link down visible on the other side */
5963         if (bp->phy_flags & PHY_SERDES_FLAG) {
5964                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5965                 spin_unlock_bh(&bp->phy_lock);
5966
5967                 msleep(20);
5968
5969                 spin_lock_bh(&bp->phy_lock);
5970
5971                 bp->current_interval = SERDES_AN_TIMEOUT;
5972                 bp->serdes_an_pending = 1;
5973                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5974         }
5975
5976         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5977         bmcr &= ~BMCR_LOOPBACK;
5978         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5979
5980         spin_unlock_bh(&bp->phy_lock);
5981
5982         return 0;
5983 }
5984
5985 static int
5986 bnx2_get_eeprom_len(struct net_device *dev)
5987 {
5988         struct bnx2 *bp = netdev_priv(dev);
5989
5990         if (bp->flash_info == NULL)
5991                 return 0;
5992
5993         return (int) bp->flash_size;
5994 }
5995
5996 static int
5997 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5998                 u8 *eebuf)
5999 {
6000         struct bnx2 *bp = netdev_priv(dev);
6001         int rc;
6002
6003         /* parameters already validated in ethtool_get_eeprom */
6004
6005         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6006
6007         return rc;
6008 }
6009
6010 static int
6011 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6012                 u8 *eebuf)
6013 {
6014         struct bnx2 *bp = netdev_priv(dev);
6015         int rc;
6016
6017         /* parameters already validated in ethtool_set_eeprom */
6018
6019         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6020
6021         return rc;
6022 }
6023
6024 static int
6025 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6026 {
6027         struct bnx2 *bp = netdev_priv(dev);
6028
6029         memset(coal, 0, sizeof(struct ethtool_coalesce));
6030
6031         coal->rx_coalesce_usecs = bp->rx_ticks;
6032         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6033         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6034         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6035
6036         coal->tx_coalesce_usecs = bp->tx_ticks;
6037         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6038         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6039         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6040
6041         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6042
6043         return 0;
6044 }
6045
6046 static int
6047 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6048 {
6049         struct bnx2 *bp = netdev_priv(dev);
6050
6051         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6052         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6053
6054         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6055         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6056
6057         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6058         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6059
6060         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6061         if (bp->rx_quick_cons_trip_int > 0xff)
6062                 bp->rx_quick_cons_trip_int = 0xff;
6063