net: convert print_mac to %pM
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE             0x10000
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.8.1"
61 #define DRV_MODULE_RELDATE      "Oct 7, 2008"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90         BCM5709S,
91         BCM5716,
92 } board_t;
93
94 /* indexed by board_t, above */
95 static struct {
96         char *name;
97 } board_info[] __devinitdata = {
98         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
99         { "HP NC370T Multifunction Gigabit Server Adapter" },
100         { "HP NC370i Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
102         { "HP NC370F Multifunction Gigabit Server Adapter" },
103         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
105         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
108         };
109
110 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
120           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129         { PCI_VENDOR_ID_BROADCOM, 0x163b,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
131         { 0, }
132 };
133
134 static struct flash_spec flash_table[] =
135 {
136 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
137 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
138         /* Slow EEPROM */
139         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
140          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
141          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
142          "EEPROM - slow"},
143         /* Expansion entry 0001 */
144         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
145          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147          "Entry 0001"},
148         /* Saifun SA25F010 (non-buffered flash) */
149         /* strap, cfg1, & write1 need updates */
150         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
151          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
153          "Non-buffered flash (128kB)"},
154         /* Saifun SA25F020 (non-buffered flash) */
155         /* strap, cfg1, & write1 need updates */
156         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
159          "Non-buffered flash (256kB)"},
160         /* Expansion entry 0100 */
161         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
164          "Entry 0100"},
165         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
166         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
169          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
170         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
171         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
173          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
174          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
175         /* Saifun SA25F005 (non-buffered flash) */
176         /* strap, cfg1, & write1 need updates */
177         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
180          "Non-buffered flash (64kB)"},
181         /* Fast EEPROM */
182         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
183          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
184          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
185          "EEPROM - fast"},
186         /* Expansion entry 1001 */
187         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1001"},
191         /* Expansion entry 1010 */
192         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1010"},
196         /* ATMEL AT45DB011B (buffered flash) */
197         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
198          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
200          "Buffered flash (128kB)"},
201         /* Expansion entry 1100 */
202         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1100"},
206         /* Expansion entry 1101 */
207         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1101"},
211         /* Ateml Expansion entry 1110 */
212         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
215          "Entry 1110 (Atmel)"},
216         /* ATMEL AT45DB021B (buffered flash) */
217         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
218          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
219          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
220          "Buffered flash (256kB)"},
221 };
222
223 static struct flash_spec flash_5709 = {
224         .flags          = BNX2_NV_BUFFERED,
225         .page_bits      = BCM5709_FLASH_PAGE_BITS,
226         .page_size      = BCM5709_FLASH_PAGE_SIZE,
227         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
228         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
229         .name           = "5709 Buffered flash (256kB)",
230 };
231
232 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
233
234 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
235 {
236         u32 diff;
237
238         smp_mb();
239
240         /* The ring uses 256 indices for 255 entries, one of them
241          * needs to be skipped.
242          */
243         diff = txr->tx_prod - txr->tx_cons;
244         if (unlikely(diff >= TX_DESC_CNT)) {
245                 diff &= 0xffff;
246                 if (diff == TX_DESC_CNT)
247                         diff = MAX_TX_DESC_CNT;
248         }
249         return (bp->tx_ring_size - diff);
250 }
251
252 static u32
253 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
254 {
255         u32 val;
256
257         spin_lock_bh(&bp->indirect_lock);
258         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
259         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
260         spin_unlock_bh(&bp->indirect_lock);
261         return val;
262 }
263
264 static void
265 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
266 {
267         spin_lock_bh(&bp->indirect_lock);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
269         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
270         spin_unlock_bh(&bp->indirect_lock);
271 }
272
273 static void
274 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
275 {
276         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
277 }
278
279 static u32
280 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
281 {
282         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
283 }
284
285 static void
286 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
287 {
288         offset += cid_addr;
289         spin_lock_bh(&bp->indirect_lock);
290         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
291                 int i;
292
293                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
294                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
295                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
296                 for (i = 0; i < 5; i++) {
297                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299                                 break;
300                         udelay(5);
301                 }
302         } else {
303                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304                 REG_WR(bp, BNX2_CTX_DATA, val);
305         }
306         spin_unlock_bh(&bp->indirect_lock);
307 }
308
309 static int
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 {
312         u32 val1;
313         int i, ret;
314
315         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322                 udelay(40);
323         }
324
325         val1 = (bp->phy_addr << 21) | (reg << 16) |
326                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327                 BNX2_EMAC_MDIO_COMM_START_BUSY;
328         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
329
330         for (i = 0; i < 50; i++) {
331                 udelay(10);
332
333                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
335                         udelay(5);
336
337                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
339
340                         break;
341                 }
342         }
343
344         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
345                 *val = 0x0;
346                 ret = -EBUSY;
347         }
348         else {
349                 *val = val1;
350                 ret = 0;
351         }
352
353         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356
357                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359
360                 udelay(40);
361         }
362
363         return ret;
364 }
365
366 static int
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 {
369         u32 val1;
370         int i, ret;
371
372         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
386
387         for (i = 0; i < 50; i++) {
388                 udelay(10);
389
390                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
392                         udelay(5);
393                         break;
394                 }
395         }
396
397         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398                 ret = -EBUSY;
399         else
400                 ret = 0;
401
402         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
405
406                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408
409                 udelay(40);
410         }
411
412         return ret;
413 }
414
415 static void
416 bnx2_disable_int(struct bnx2 *bp)
417 {
418         int i;
419         struct bnx2_napi *bnapi;
420
421         for (i = 0; i < bp->irq_nvecs; i++) {
422                 bnapi = &bp->bnx2_napi[i];
423                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
425         }
426         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
427 }
428
429 static void
430 bnx2_enable_int(struct bnx2 *bp)
431 {
432         int i;
433         struct bnx2_napi *bnapi;
434
435         for (i = 0; i < bp->irq_nvecs; i++) {
436                 bnapi = &bp->bnx2_napi[i];
437
438                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441                        bnapi->last_status_idx);
442
443                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445                        bnapi->last_status_idx);
446         }
447         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
448 }
449
450 static void
451 bnx2_disable_int_sync(struct bnx2 *bp)
452 {
453         int i;
454
455         atomic_inc(&bp->intr_sem);
456         bnx2_disable_int(bp);
457         for (i = 0; i < bp->irq_nvecs; i++)
458                 synchronize_irq(bp->irq_tbl[i].vector);
459 }
460
461 static void
462 bnx2_napi_disable(struct bnx2 *bp)
463 {
464         int i;
465
466         for (i = 0; i < bp->irq_nvecs; i++)
467                 napi_disable(&bp->bnx2_napi[i].napi);
468 }
469
470 static void
471 bnx2_napi_enable(struct bnx2 *bp)
472 {
473         int i;
474
475         for (i = 0; i < bp->irq_nvecs; i++)
476                 napi_enable(&bp->bnx2_napi[i].napi);
477 }
478
479 static void
480 bnx2_netif_stop(struct bnx2 *bp)
481 {
482         bnx2_disable_int_sync(bp);
483         if (netif_running(bp->dev)) {
484                 bnx2_napi_disable(bp);
485                 netif_tx_disable(bp->dev);
486                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487         }
488 }
489
490 static void
491 bnx2_netif_start(struct bnx2 *bp)
492 {
493         if (atomic_dec_and_test(&bp->intr_sem)) {
494                 if (netif_running(bp->dev)) {
495                         netif_tx_wake_all_queues(bp->dev);
496                         bnx2_napi_enable(bp);
497                         bnx2_enable_int(bp);
498                 }
499         }
500 }
501
502 static void
503 bnx2_free_tx_mem(struct bnx2 *bp)
504 {
505         int i;
506
507         for (i = 0; i < bp->num_tx_rings; i++) {
508                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
510
511                 if (txr->tx_desc_ring) {
512                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513                                             txr->tx_desc_ring,
514                                             txr->tx_desc_mapping);
515                         txr->tx_desc_ring = NULL;
516                 }
517                 kfree(txr->tx_buf_ring);
518                 txr->tx_buf_ring = NULL;
519         }
520 }
521
522 static void
523 bnx2_free_rx_mem(struct bnx2 *bp)
524 {
525         int i;
526
527         for (i = 0; i < bp->num_rx_rings; i++) {
528                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530                 int j;
531
532                 for (j = 0; j < bp->rx_max_ring; j++) {
533                         if (rxr->rx_desc_ring[j])
534                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535                                                     rxr->rx_desc_ring[j],
536                                                     rxr->rx_desc_mapping[j]);
537                         rxr->rx_desc_ring[j] = NULL;
538                 }
539                 if (rxr->rx_buf_ring)
540                         vfree(rxr->rx_buf_ring);
541                 rxr->rx_buf_ring = NULL;
542
543                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544                         if (rxr->rx_pg_desc_ring[j])
545                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546                                                     rxr->rx_pg_desc_ring[i],
547                                                     rxr->rx_pg_desc_mapping[i]);
548                         rxr->rx_pg_desc_ring[i] = NULL;
549                 }
550                 if (rxr->rx_pg_ring)
551                         vfree(rxr->rx_pg_ring);
552                 rxr->rx_pg_ring = NULL;
553         }
554 }
555
556 static int
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
558 {
559         int i;
560
561         for (i = 0; i < bp->num_tx_rings; i++) {
562                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
564
565                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566                 if (txr->tx_buf_ring == NULL)
567                         return -ENOMEM;
568
569                 txr->tx_desc_ring =
570                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571                                              &txr->tx_desc_mapping);
572                 if (txr->tx_desc_ring == NULL)
573                         return -ENOMEM;
574         }
575         return 0;
576 }
577
578 static int
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
580 {
581         int i;
582
583         for (i = 0; i < bp->num_rx_rings; i++) {
584                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586                 int j;
587
588                 rxr->rx_buf_ring =
589                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590                 if (rxr->rx_buf_ring == NULL)
591                         return -ENOMEM;
592
593                 memset(rxr->rx_buf_ring, 0,
594                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
595
596                 for (j = 0; j < bp->rx_max_ring; j++) {
597                         rxr->rx_desc_ring[j] =
598                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599                                                      &rxr->rx_desc_mapping[j]);
600                         if (rxr->rx_desc_ring[j] == NULL)
601                                 return -ENOMEM;
602
603                 }
604
605                 if (bp->rx_pg_ring_size) {
606                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
607                                                   bp->rx_max_pg_ring);
608                         if (rxr->rx_pg_ring == NULL)
609                                 return -ENOMEM;
610
611                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612                                bp->rx_max_pg_ring);
613                 }
614
615                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616                         rxr->rx_pg_desc_ring[j] =
617                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618                                                 &rxr->rx_pg_desc_mapping[j]);
619                         if (rxr->rx_pg_desc_ring[j] == NULL)
620                                 return -ENOMEM;
621
622                 }
623         }
624         return 0;
625 }
626
627 static void
628 bnx2_free_mem(struct bnx2 *bp)
629 {
630         int i;
631         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
632
633         bnx2_free_tx_mem(bp);
634         bnx2_free_rx_mem(bp);
635
636         for (i = 0; i < bp->ctx_pages; i++) {
637                 if (bp->ctx_blk[i]) {
638                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639                                             bp->ctx_blk[i],
640                                             bp->ctx_blk_mapping[i]);
641                         bp->ctx_blk[i] = NULL;
642                 }
643         }
644         if (bnapi->status_blk.msi) {
645                 pci_free_consistent(bp->pdev, bp->status_stats_size,
646                                     bnapi->status_blk.msi,
647                                     bp->status_blk_mapping);
648                 bnapi->status_blk.msi = NULL;
649                 bp->stats_blk = NULL;
650         }
651 }
652
653 static int
654 bnx2_alloc_mem(struct bnx2 *bp)
655 {
656         int i, status_blk_size, err;
657         struct bnx2_napi *bnapi;
658         void *status_blk;
659
660         /* Combine status and statistics blocks into one allocation. */
661         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662         if (bp->flags & BNX2_FLAG_MSIX_CAP)
663                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
665         bp->status_stats_size = status_blk_size +
666                                 sizeof(struct statistics_block);
667
668         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669                                           &bp->status_blk_mapping);
670         if (status_blk == NULL)
671                 goto alloc_mem_err;
672
673         memset(status_blk, 0, bp->status_stats_size);
674
675         bnapi = &bp->bnx2_napi[0];
676         bnapi->status_blk.msi = status_blk;
677         bnapi->hw_tx_cons_ptr =
678                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679         bnapi->hw_rx_cons_ptr =
680                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683                         struct status_block_msix *sblk;
684
685                         bnapi = &bp->bnx2_napi[i];
686
687                         sblk = (void *) (status_blk +
688                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689                         bnapi->status_blk.msix = sblk;
690                         bnapi->hw_tx_cons_ptr =
691                                 &sblk->status_tx_quick_consumer_index;
692                         bnapi->hw_rx_cons_ptr =
693                                 &sblk->status_rx_quick_consumer_index;
694                         bnapi->int_num = i << 24;
695                 }
696         }
697
698         bp->stats_blk = status_blk + status_blk_size;
699
700         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
701
702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704                 if (bp->ctx_pages == 0)
705                         bp->ctx_pages = 1;
706                 for (i = 0; i < bp->ctx_pages; i++) {
707                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
708                                                 BCM_PAGE_SIZE,
709                                                 &bp->ctx_blk_mapping[i]);
710                         if (bp->ctx_blk[i] == NULL)
711                                 goto alloc_mem_err;
712                 }
713         }
714
715         err = bnx2_alloc_rx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         err = bnx2_alloc_tx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         return 0;
724
725 alloc_mem_err:
726         bnx2_free_mem(bp);
727         return -ENOMEM;
728 }
729
730 static void
731 bnx2_report_fw_link(struct bnx2 *bp)
732 {
733         u32 fw_link_status = 0;
734
735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
736                 return;
737
738         if (bp->link_up) {
739                 u32 bmsr;
740
741                 switch (bp->line_speed) {
742                 case SPEED_10:
743                         if (bp->duplex == DUPLEX_HALF)
744                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
745                         else
746                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
747                         break;
748                 case SPEED_100:
749                         if (bp->duplex == DUPLEX_HALF)
750                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
751                         else
752                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
753                         break;
754                 case SPEED_1000:
755                         if (bp->duplex == DUPLEX_HALF)
756                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
757                         else
758                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
759                         break;
760                 case SPEED_2500:
761                         if (bp->duplex == DUPLEX_HALF)
762                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
763                         else
764                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
765                         break;
766                 }
767
768                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
769
770                 if (bp->autoneg) {
771                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
772
773                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
775
776                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
779                         else
780                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
781                 }
782         }
783         else
784                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
785
786         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
787 }
788
789 static char *
790 bnx2_xceiver_str(struct bnx2 *bp)
791 {
792         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
794                  "Copper"));
795 }
796
797 static void
798 bnx2_report_link(struct bnx2 *bp)
799 {
800         if (bp->link_up) {
801                 netif_carrier_on(bp->dev);
802                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803                        bnx2_xceiver_str(bp));
804
805                 printk("%d Mbps ", bp->line_speed);
806
807                 if (bp->duplex == DUPLEX_FULL)
808                         printk("full duplex");
809                 else
810                         printk("half duplex");
811
812                 if (bp->flow_ctrl) {
813                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
814                                 printk(", receive ");
815                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
816                                         printk("& transmit ");
817                         }
818                         else {
819                                 printk(", transmit ");
820                         }
821                         printk("flow control ON");
822                 }
823                 printk("\n");
824         }
825         else {
826                 netif_carrier_off(bp->dev);
827                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828                        bnx2_xceiver_str(bp));
829         }
830
831         bnx2_report_fw_link(bp);
832 }
833
834 static void
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
836 {
837         u32 local_adv, remote_adv;
838
839         bp->flow_ctrl = 0;
840         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
842
843                 if (bp->duplex == DUPLEX_FULL) {
844                         bp->flow_ctrl = bp->req_flow_ctrl;
845                 }
846                 return;
847         }
848
849         if (bp->duplex != DUPLEX_FULL) {
850                 return;
851         }
852
853         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
855                 u32 val;
856
857                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859                         bp->flow_ctrl |= FLOW_CTRL_TX;
860                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861                         bp->flow_ctrl |= FLOW_CTRL_RX;
862                 return;
863         }
864
865         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
867
868         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869                 u32 new_local_adv = 0;
870                 u32 new_remote_adv = 0;
871
872                 if (local_adv & ADVERTISE_1000XPAUSE)
873                         new_local_adv |= ADVERTISE_PAUSE_CAP;
874                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
876                 if (remote_adv & ADVERTISE_1000XPAUSE)
877                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
878                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
880
881                 local_adv = new_local_adv;
882                 remote_adv = new_remote_adv;
883         }
884
885         /* See Table 28B-3 of 802.3ab-1999 spec. */
886         if (local_adv & ADVERTISE_PAUSE_CAP) {
887                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
889                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
890                         }
891                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892                                 bp->flow_ctrl = FLOW_CTRL_RX;
893                         }
894                 }
895                 else {
896                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
897                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898                         }
899                 }
900         }
901         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
904
905                         bp->flow_ctrl = FLOW_CTRL_TX;
906                 }
907         }
908 }
909
910 static int
911 bnx2_5709s_linkup(struct bnx2 *bp)
912 {
913         u32 val, speed;
914
915         bp->link_up = 1;
916
917         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
920
921         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922                 bp->line_speed = bp->req_line_speed;
923                 bp->duplex = bp->req_duplex;
924                 return 0;
925         }
926         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
927         switch (speed) {
928                 case MII_BNX2_GP_TOP_AN_SPEED_10:
929                         bp->line_speed = SPEED_10;
930                         break;
931                 case MII_BNX2_GP_TOP_AN_SPEED_100:
932                         bp->line_speed = SPEED_100;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936                         bp->line_speed = SPEED_1000;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939                         bp->line_speed = SPEED_2500;
940                         break;
941         }
942         if (val & MII_BNX2_GP_TOP_AN_FD)
943                 bp->duplex = DUPLEX_FULL;
944         else
945                 bp->duplex = DUPLEX_HALF;
946         return 0;
947 }
948
949 static int
950 bnx2_5708s_linkup(struct bnx2 *bp)
951 {
952         u32 val;
953
954         bp->link_up = 1;
955         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957                 case BCM5708S_1000X_STAT1_SPEED_10:
958                         bp->line_speed = SPEED_10;
959                         break;
960                 case BCM5708S_1000X_STAT1_SPEED_100:
961                         bp->line_speed = SPEED_100;
962                         break;
963                 case BCM5708S_1000X_STAT1_SPEED_1G:
964                         bp->line_speed = SPEED_1000;
965                         break;
966                 case BCM5708S_1000X_STAT1_SPEED_2G5:
967                         bp->line_speed = SPEED_2500;
968                         break;
969         }
970         if (val & BCM5708S_1000X_STAT1_FD)
971                 bp->duplex = DUPLEX_FULL;
972         else
973                 bp->duplex = DUPLEX_HALF;
974
975         return 0;
976 }
977
978 static int
979 bnx2_5706s_linkup(struct bnx2 *bp)
980 {
981         u32 bmcr, local_adv, remote_adv, common;
982
983         bp->link_up = 1;
984         bp->line_speed = SPEED_1000;
985
986         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987         if (bmcr & BMCR_FULLDPLX) {
988                 bp->duplex = DUPLEX_FULL;
989         }
990         else {
991                 bp->duplex = DUPLEX_HALF;
992         }
993
994         if (!(bmcr & BMCR_ANENABLE)) {
995                 return 0;
996         }
997
998         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1000
1001         common = local_adv & remote_adv;
1002         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1003
1004                 if (common & ADVERTISE_1000XFULL) {
1005                         bp->duplex = DUPLEX_FULL;
1006                 }
1007                 else {
1008                         bp->duplex = DUPLEX_HALF;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 bnx2_copper_linkup(struct bnx2 *bp)
1017 {
1018         u32 bmcr;
1019
1020         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021         if (bmcr & BMCR_ANENABLE) {
1022                 u32 local_adv, remote_adv, common;
1023
1024                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1026
1027                 common = local_adv & (remote_adv >> 2);
1028                 if (common & ADVERTISE_1000FULL) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_FULL;
1031                 }
1032                 else if (common & ADVERTISE_1000HALF) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_HALF;
1035                 }
1036                 else {
1037                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1039
1040                         common = local_adv & remote_adv;
1041                         if (common & ADVERTISE_100FULL) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_FULL;
1044                         }
1045                         else if (common & ADVERTISE_100HALF) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_HALF;
1048                         }
1049                         else if (common & ADVERTISE_10FULL) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_FULL;
1052                         }
1053                         else if (common & ADVERTISE_10HALF) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_HALF;
1056                         }
1057                         else {
1058                                 bp->line_speed = 0;
1059                                 bp->link_up = 0;
1060                         }
1061                 }
1062         }
1063         else {
1064                 if (bmcr & BMCR_SPEED100) {
1065                         bp->line_speed = SPEED_100;
1066                 }
1067                 else {
1068                         bp->line_speed = SPEED_10;
1069                 }
1070                 if (bmcr & BMCR_FULLDPLX) {
1071                         bp->duplex = DUPLEX_FULL;
1072                 }
1073                 else {
1074                         bp->duplex = DUPLEX_HALF;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static void
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1083 {
1084         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1085
1086         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1088         val |= 0x02 << 8;
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091                 u32 lo_water, hi_water;
1092
1093                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1095                 else
1096                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097                 if (lo_water >= bp->rx_ring_size)
1098                         lo_water = 0;
1099
1100                 hi_water = bp->rx_ring_size / 4;
1101
1102                 if (hi_water <= lo_water)
1103                         lo_water = 0;
1104
1105                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1107
1108                 if (hi_water > 0xf)
1109                         hi_water = 0xf;
1110                 else if (hi_water == 0)
1111                         lo_water = 0;
1112                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1113         }
1114         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1115 }
1116
1117 static void
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119 {
1120         int i;
1121         u32 cid;
1122
1123         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124                 if (i == 1)
1125                         cid = RX_RSS_CID;
1126                 bnx2_init_rx_context(bp, cid);
1127         }
1128 }
1129
1130 static void
1131 bnx2_set_mac_link(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137                 (bp->duplex == DUPLEX_HALF)) {
1138                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1139         }
1140
1141         /* Configure the EMAC mode register. */
1142         val = REG_RD(bp, BNX2_EMAC_MODE);
1143
1144         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146                 BNX2_EMAC_MODE_25G_MODE);
1147
1148         if (bp->link_up) {
1149                 switch (bp->line_speed) {
1150                         case SPEED_10:
1151                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1153                                         break;
1154                                 }
1155                                 /* fall through */
1156                         case SPEED_100:
1157                                 val |= BNX2_EMAC_MODE_PORT_MII;
1158                                 break;
1159                         case SPEED_2500:
1160                                 val |= BNX2_EMAC_MODE_25G_MODE;
1161                                 /* fall through */
1162                         case SPEED_1000:
1163                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1164                                 break;
1165                 }
1166         }
1167         else {
1168                 val |= BNX2_EMAC_MODE_PORT_GMII;
1169         }
1170
1171         /* Set the MAC to operate in the appropriate duplex mode. */
1172         if (bp->duplex == DUPLEX_HALF)
1173                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174         REG_WR(bp, BNX2_EMAC_MODE, val);
1175
1176         /* Enable/disable rx PAUSE. */
1177         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1178
1179         if (bp->flow_ctrl & FLOW_CTRL_RX)
1180                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1182
1183         /* Enable/disable tx PAUSE. */
1184         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1186
1187         if (bp->flow_ctrl & FLOW_CTRL_TX)
1188                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1190
1191         /* Acknowledge the interrupt. */
1192         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1193
1194         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195                 bnx2_init_all_rx_contexts(bp);
1196 }
1197
1198 static void
1199 bnx2_enable_bmsr1(struct bnx2 *bp)
1200 {
1201         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1202             (CHIP_NUM(bp) == CHIP_NUM_5709))
1203                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1204                                MII_BNX2_BLK_ADDR_GP_STATUS);
1205 }
1206
1207 static void
1208 bnx2_disable_bmsr1(struct bnx2 *bp)
1209 {
1210         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1211             (CHIP_NUM(bp) == CHIP_NUM_5709))
1212                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1213                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1214 }
1215
1216 static int
1217 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1218 {
1219         u32 up1;
1220         int ret = 1;
1221
1222         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1223                 return 0;
1224
1225         if (bp->autoneg & AUTONEG_SPEED)
1226                 bp->advertising |= ADVERTISED_2500baseX_Full;
1227
1228         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1229                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1230
1231         bnx2_read_phy(bp, bp->mii_up1, &up1);
1232         if (!(up1 & BCM5708S_UP1_2G5)) {
1233                 up1 |= BCM5708S_UP1_2G5;
1234                 bnx2_write_phy(bp, bp->mii_up1, up1);
1235                 ret = 0;
1236         }
1237
1238         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1240                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1241
1242         return ret;
1243 }
1244
1245 static int
1246 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1247 {
1248         u32 up1;
1249         int ret = 0;
1250
1251         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1252                 return 0;
1253
1254         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1255                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1256
1257         bnx2_read_phy(bp, bp->mii_up1, &up1);
1258         if (up1 & BCM5708S_UP1_2G5) {
1259                 up1 &= ~BCM5708S_UP1_2G5;
1260                 bnx2_write_phy(bp, bp->mii_up1, up1);
1261                 ret = 1;
1262         }
1263
1264         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1265                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1266                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1267
1268         return ret;
1269 }
1270
1271 static void
1272 bnx2_enable_forced_2g5(struct bnx2 *bp)
1273 {
1274         u32 bmcr;
1275
1276         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1277                 return;
1278
1279         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1280                 u32 val;
1281
1282                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1283                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1284                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1285                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1286                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1287                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1288
1289                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1290                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1291                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1292
1293         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1294                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1295                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1296         }
1297
1298         if (bp->autoneg & AUTONEG_SPEED) {
1299                 bmcr &= ~BMCR_ANENABLE;
1300                 if (bp->req_duplex == DUPLEX_FULL)
1301                         bmcr |= BMCR_FULLDPLX;
1302         }
1303         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1304 }
1305
1306 static void
1307 bnx2_disable_forced_2g5(struct bnx2 *bp)
1308 {
1309         u32 bmcr;
1310
1311         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1312                 return;
1313
1314         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1315                 u32 val;
1316
1317                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1318                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1319                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1320                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1321                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1322
1323                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1324                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1325                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1326
1327         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1328                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1329                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1330         }
1331
1332         if (bp->autoneg & AUTONEG_SPEED)
1333                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1334         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1335 }
1336
1337 static void
1338 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1339 {
1340         u32 val;
1341
1342         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1343         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1344         if (start)
1345                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1346         else
1347                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1348 }
1349
1350 static int
1351 bnx2_set_link(struct bnx2 *bp)
1352 {
1353         u32 bmsr;
1354         u8 link_up;
1355
1356         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1357                 bp->link_up = 1;
1358                 return 0;
1359         }
1360
1361         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1362                 return 0;
1363
1364         link_up = bp->link_up;
1365
1366         bnx2_enable_bmsr1(bp);
1367         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1368         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1369         bnx2_disable_bmsr1(bp);
1370
1371         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1373                 u32 val, an_dbg;
1374
1375                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1376                         bnx2_5706s_force_link_dn(bp, 0);
1377                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1378                 }
1379                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1380
1381                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1382                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1383                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1384
1385                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1386                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1387                         bmsr |= BMSR_LSTATUS;
1388                 else
1389                         bmsr &= ~BMSR_LSTATUS;
1390         }
1391
1392         if (bmsr & BMSR_LSTATUS) {
1393                 bp->link_up = 1;
1394
1395                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1396                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1397                                 bnx2_5706s_linkup(bp);
1398                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1399                                 bnx2_5708s_linkup(bp);
1400                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1401                                 bnx2_5709s_linkup(bp);
1402                 }
1403                 else {
1404                         bnx2_copper_linkup(bp);
1405                 }
1406                 bnx2_resolve_flow_ctrl(bp);
1407         }
1408         else {
1409                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1410                     (bp->autoneg & AUTONEG_SPEED))
1411                         bnx2_disable_forced_2g5(bp);
1412
1413                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1414                         u32 bmcr;
1415
1416                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1417                         bmcr |= BMCR_ANENABLE;
1418                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1419
1420                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1421                 }
1422                 bp->link_up = 0;
1423         }
1424
1425         if (bp->link_up != link_up) {
1426                 bnx2_report_link(bp);
1427         }
1428
1429         bnx2_set_mac_link(bp);
1430
1431         return 0;
1432 }
1433
1434 static int
1435 bnx2_reset_phy(struct bnx2 *bp)
1436 {
1437         int i;
1438         u32 reg;
1439
1440         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1441
1442 #define PHY_RESET_MAX_WAIT 100
1443         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1444                 udelay(10);
1445
1446                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1447                 if (!(reg & BMCR_RESET)) {
1448                         udelay(20);
1449                         break;
1450                 }
1451         }
1452         if (i == PHY_RESET_MAX_WAIT) {
1453                 return -EBUSY;
1454         }
1455         return 0;
1456 }
1457
1458 static u32
1459 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1460 {
1461         u32 adv = 0;
1462
1463         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1464                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1465
1466                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1467                         adv = ADVERTISE_1000XPAUSE;
1468                 }
1469                 else {
1470                         adv = ADVERTISE_PAUSE_CAP;
1471                 }
1472         }
1473         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1474                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1475                         adv = ADVERTISE_1000XPSE_ASYM;
1476                 }
1477                 else {
1478                         adv = ADVERTISE_PAUSE_ASYM;
1479                 }
1480         }
1481         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1482                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1483                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1484                 }
1485                 else {
1486                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1487                 }
1488         }
1489         return adv;
1490 }
1491
1492 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1493
1494 static int
1495 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1496 {
1497         u32 speed_arg = 0, pause_adv;
1498
1499         pause_adv = bnx2_phy_get_pause_adv(bp);
1500
1501         if (bp->autoneg & AUTONEG_SPEED) {
1502                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1503                 if (bp->advertising & ADVERTISED_10baseT_Half)
1504                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1505                 if (bp->advertising & ADVERTISED_10baseT_Full)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1507                 if (bp->advertising & ADVERTISED_100baseT_Half)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1509                 if (bp->advertising & ADVERTISED_100baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1511                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1513                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1515         } else {
1516                 if (bp->req_line_speed == SPEED_2500)
1517                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1518                 else if (bp->req_line_speed == SPEED_1000)
1519                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1520                 else if (bp->req_line_speed == SPEED_100) {
1521                         if (bp->req_duplex == DUPLEX_FULL)
1522                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1523                         else
1524                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1525                 } else if (bp->req_line_speed == SPEED_10) {
1526                         if (bp->req_duplex == DUPLEX_FULL)
1527                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1528                         else
1529                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1530                 }
1531         }
1532
1533         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1534                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1535         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1536                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1537
1538         if (port == PORT_TP)
1539                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1540                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1541
1542         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1543
1544         spin_unlock_bh(&bp->phy_lock);
1545         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1546         spin_lock_bh(&bp->phy_lock);
1547
1548         return 0;
1549 }
1550
1551 static int
1552 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1553 {
1554         u32 adv, bmcr;
1555         u32 new_adv = 0;
1556
1557         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1558                 return (bnx2_setup_remote_phy(bp, port));
1559
1560         if (!(bp->autoneg & AUTONEG_SPEED)) {
1561                 u32 new_bmcr;
1562                 int force_link_down = 0;
1563
1564                 if (bp->req_line_speed == SPEED_2500) {
1565                         if (!bnx2_test_and_enable_2g5(bp))
1566                                 force_link_down = 1;
1567                 } else if (bp->req_line_speed == SPEED_1000) {
1568                         if (bnx2_test_and_disable_2g5(bp))
1569                                 force_link_down = 1;
1570                 }
1571                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1572                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1573
1574                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1575                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1576                 new_bmcr |= BMCR_SPEED1000;
1577
1578                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1579                         if (bp->req_line_speed == SPEED_2500)
1580                                 bnx2_enable_forced_2g5(bp);
1581                         else if (bp->req_line_speed == SPEED_1000) {
1582                                 bnx2_disable_forced_2g5(bp);
1583                                 new_bmcr &= ~0x2000;
1584                         }
1585
1586                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1587                         if (bp->req_line_speed == SPEED_2500)
1588                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1589                         else
1590                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1591                 }
1592
1593                 if (bp->req_duplex == DUPLEX_FULL) {
1594                         adv |= ADVERTISE_1000XFULL;
1595                         new_bmcr |= BMCR_FULLDPLX;
1596                 }
1597                 else {
1598                         adv |= ADVERTISE_1000XHALF;
1599                         new_bmcr &= ~BMCR_FULLDPLX;
1600                 }
1601                 if ((new_bmcr != bmcr) || (force_link_down)) {
1602                         /* Force a link down visible on the other side */
1603                         if (bp->link_up) {
1604                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1605                                                ~(ADVERTISE_1000XFULL |
1606                                                  ADVERTISE_1000XHALF));
1607                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1608                                         BMCR_ANRESTART | BMCR_ANENABLE);
1609
1610                                 bp->link_up = 0;
1611                                 netif_carrier_off(bp->dev);
1612                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1613                                 bnx2_report_link(bp);
1614                         }
1615                         bnx2_write_phy(bp, bp->mii_adv, adv);
1616                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1617                 } else {
1618                         bnx2_resolve_flow_ctrl(bp);
1619                         bnx2_set_mac_link(bp);
1620                 }
1621                 return 0;
1622         }
1623
1624         bnx2_test_and_enable_2g5(bp);
1625
1626         if (bp->advertising & ADVERTISED_1000baseT_Full)
1627                 new_adv |= ADVERTISE_1000XFULL;
1628
1629         new_adv |= bnx2_phy_get_pause_adv(bp);
1630
1631         bnx2_read_phy(bp, bp->mii_adv, &adv);
1632         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1633
1634         bp->serdes_an_pending = 0;
1635         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1636                 /* Force a link down visible on the other side */
1637                 if (bp->link_up) {
1638                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1639                         spin_unlock_bh(&bp->phy_lock);
1640                         msleep(20);
1641                         spin_lock_bh(&bp->phy_lock);
1642                 }
1643
1644                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1645                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1646                         BMCR_ANENABLE);
1647                 /* Speed up link-up time when the link partner
1648                  * does not autonegotiate which is very common
1649                  * in blade servers. Some blade servers use
1650                  * IPMI for kerboard input and it's important
1651                  * to minimize link disruptions. Autoneg. involves
1652                  * exchanging base pages plus 3 next pages and
1653                  * normally completes in about 120 msec.
1654                  */
1655                 bp->current_interval = SERDES_AN_TIMEOUT;
1656                 bp->serdes_an_pending = 1;
1657                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1658         } else {
1659                 bnx2_resolve_flow_ctrl(bp);
1660                 bnx2_set_mac_link(bp);
1661         }
1662
1663         return 0;
1664 }
1665
1666 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1667         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1668                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1669                 (ADVERTISED_1000baseT_Full)
1670
1671 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1672         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1673         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1674         ADVERTISED_1000baseT_Full)
1675
1676 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1677         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1678
1679 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1680
1681 static void
1682 bnx2_set_default_remote_link(struct bnx2 *bp)
1683 {
1684         u32 link;
1685
1686         if (bp->phy_port == PORT_TP)
1687                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1688         else
1689                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1690
1691         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1692                 bp->req_line_speed = 0;
1693                 bp->autoneg |= AUTONEG_SPEED;
1694                 bp->advertising = ADVERTISED_Autoneg;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1696                         bp->advertising |= ADVERTISED_10baseT_Half;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1698                         bp->advertising |= ADVERTISED_10baseT_Full;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1700                         bp->advertising |= ADVERTISED_100baseT_Half;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1702                         bp->advertising |= ADVERTISED_100baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1704                         bp->advertising |= ADVERTISED_1000baseT_Full;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1706                         bp->advertising |= ADVERTISED_2500baseX_Full;
1707         } else {
1708                 bp->autoneg = 0;
1709                 bp->advertising = 0;
1710                 bp->req_duplex = DUPLEX_FULL;
1711                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1712                         bp->req_line_speed = SPEED_10;
1713                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1714                                 bp->req_duplex = DUPLEX_HALF;
1715                 }
1716                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1717                         bp->req_line_speed = SPEED_100;
1718                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1719                                 bp->req_duplex = DUPLEX_HALF;
1720                 }
1721                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1722                         bp->req_line_speed = SPEED_1000;
1723                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1724                         bp->req_line_speed = SPEED_2500;
1725         }
1726 }
1727
1728 static void
1729 bnx2_set_default_link(struct bnx2 *bp)
1730 {
1731         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1732                 bnx2_set_default_remote_link(bp);
1733                 return;
1734         }
1735
1736         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1737         bp->req_line_speed = 0;
1738         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1739                 u32 reg;
1740
1741                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1742
1743                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1744                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1745                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1746                         bp->autoneg = 0;
1747                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1748                         bp->req_duplex = DUPLEX_FULL;
1749                 }
1750         } else
1751                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1752 }
1753
1754 static void
1755 bnx2_send_heart_beat(struct bnx2 *bp)
1756 {
1757         u32 msg;
1758         u32 addr;
1759
1760         spin_lock(&bp->indirect_lock);
1761         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1762         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1763         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1764         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1765         spin_unlock(&bp->indirect_lock);
1766 }
1767
1768 static void
1769 bnx2_remote_phy_event(struct bnx2 *bp)
1770 {
1771         u32 msg;
1772         u8 link_up = bp->link_up;
1773         u8 old_port;
1774
1775         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1776
1777         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1778                 bnx2_send_heart_beat(bp);
1779
1780         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1781
1782         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1783                 bp->link_up = 0;
1784         else {
1785                 u32 speed;
1786
1787                 bp->link_up = 1;
1788                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1789                 bp->duplex = DUPLEX_FULL;
1790                 switch (speed) {
1791                         case BNX2_LINK_STATUS_10HALF:
1792                                 bp->duplex = DUPLEX_HALF;
1793                         case BNX2_LINK_STATUS_10FULL:
1794                                 bp->line_speed = SPEED_10;
1795                                 break;
1796                         case BNX2_LINK_STATUS_100HALF:
1797                                 bp->duplex = DUPLEX_HALF;
1798                         case BNX2_LINK_STATUS_100BASE_T4:
1799                         case BNX2_LINK_STATUS_100FULL:
1800                                 bp->line_speed = SPEED_100;
1801                                 break;
1802                         case BNX2_LINK_STATUS_1000HALF:
1803                                 bp->duplex = DUPLEX_HALF;
1804                         case BNX2_LINK_STATUS_1000FULL:
1805                                 bp->line_speed = SPEED_1000;
1806                                 break;
1807                         case BNX2_LINK_STATUS_2500HALF:
1808                                 bp->duplex = DUPLEX_HALF;
1809                         case BNX2_LINK_STATUS_2500FULL:
1810                                 bp->line_speed = SPEED_2500;
1811                                 break;
1812                         default:
1813                                 bp->line_speed = 0;
1814                                 break;
1815                 }
1816
1817                 bp->flow_ctrl = 0;
1818                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1819                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1820                         if (bp->duplex == DUPLEX_FULL)
1821                                 bp->flow_ctrl = bp->req_flow_ctrl;
1822                 } else {
1823                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1824                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1825                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1826                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1827                 }
1828
1829                 old_port = bp->phy_port;
1830                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1831                         bp->phy_port = PORT_FIBRE;
1832                 else
1833                         bp->phy_port = PORT_TP;
1834
1835                 if (old_port != bp->phy_port)
1836                         bnx2_set_default_link(bp);
1837
1838         }
1839         if (bp->link_up != link_up)
1840                 bnx2_report_link(bp);
1841
1842         bnx2_set_mac_link(bp);
1843 }
1844
1845 static int
1846 bnx2_set_remote_link(struct bnx2 *bp)
1847 {
1848         u32 evt_code;
1849
1850         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1851         switch (evt_code) {
1852                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1853                         bnx2_remote_phy_event(bp);
1854                         break;
1855                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1856                 default:
1857                         bnx2_send_heart_beat(bp);
1858                         break;
1859         }
1860         return 0;
1861 }
1862
1863 static int
1864 bnx2_setup_copper_phy(struct bnx2 *bp)
1865 {
1866         u32 bmcr;
1867         u32 new_bmcr;
1868
1869         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1870
1871         if (bp->autoneg & AUTONEG_SPEED) {
1872                 u32 adv_reg, adv1000_reg;
1873                 u32 new_adv_reg = 0;
1874                 u32 new_adv1000_reg = 0;
1875
1876                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1877                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1878                         ADVERTISE_PAUSE_ASYM);
1879
1880                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1881                 adv1000_reg &= PHY_ALL_1000_SPEED;
1882
1883                 if (bp->advertising & ADVERTISED_10baseT_Half)
1884                         new_adv_reg |= ADVERTISE_10HALF;
1885                 if (bp->advertising & ADVERTISED_10baseT_Full)
1886                         new_adv_reg |= ADVERTISE_10FULL;
1887                 if (bp->advertising & ADVERTISED_100baseT_Half)
1888                         new_adv_reg |= ADVERTISE_100HALF;
1889                 if (bp->advertising & ADVERTISED_100baseT_Full)
1890                         new_adv_reg |= ADVERTISE_100FULL;
1891                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1892                         new_adv1000_reg |= ADVERTISE_1000FULL;
1893
1894                 new_adv_reg |= ADVERTISE_CSMA;
1895
1896                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1897
1898                 if ((adv1000_reg != new_adv1000_reg) ||
1899                         (adv_reg != new_adv_reg) ||
1900                         ((bmcr & BMCR_ANENABLE) == 0)) {
1901
1902                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1903                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1904                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1905                                 BMCR_ANENABLE);
1906                 }
1907                 else if (bp->link_up) {
1908                         /* Flow ctrl may have changed from auto to forced */
1909                         /* or vice-versa. */
1910
1911                         bnx2_resolve_flow_ctrl(bp);
1912                         bnx2_set_mac_link(bp);
1913                 }
1914                 return 0;
1915         }
1916
1917         new_bmcr = 0;
1918         if (bp->req_line_speed == SPEED_100) {
1919                 new_bmcr |= BMCR_SPEED100;
1920         }
1921         if (bp->req_duplex == DUPLEX_FULL) {
1922                 new_bmcr |= BMCR_FULLDPLX;
1923         }
1924         if (new_bmcr != bmcr) {
1925                 u32 bmsr;
1926
1927                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1928                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1929
1930                 if (bmsr & BMSR_LSTATUS) {
1931                         /* Force link down */
1932                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1933                         spin_unlock_bh(&bp->phy_lock);
1934                         msleep(50);
1935                         spin_lock_bh(&bp->phy_lock);
1936
1937                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1938                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1939                 }
1940
1941                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1942
1943                 /* Normally, the new speed is setup after the link has
1944                  * gone down and up again. In some cases, link will not go
1945                  * down so we need to set up the new speed here.
1946                  */
1947                 if (bmsr & BMSR_LSTATUS) {
1948                         bp->line_speed = bp->req_line_speed;
1949                         bp->duplex = bp->req_duplex;
1950                         bnx2_resolve_flow_ctrl(bp);
1951                         bnx2_set_mac_link(bp);
1952                 }
1953         } else {
1954                 bnx2_resolve_flow_ctrl(bp);
1955                 bnx2_set_mac_link(bp);
1956         }
1957         return 0;
1958 }
1959
1960 static int
1961 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1962 {
1963         if (bp->loopback == MAC_LOOPBACK)
1964                 return 0;
1965
1966         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1967                 return (bnx2_setup_serdes_phy(bp, port));
1968         }
1969         else {
1970                 return (bnx2_setup_copper_phy(bp));
1971         }
1972 }
1973
1974 static int
1975 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1976 {
1977         u32 val;
1978
1979         bp->mii_bmcr = MII_BMCR + 0x10;
1980         bp->mii_bmsr = MII_BMSR + 0x10;
1981         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1982         bp->mii_adv = MII_ADVERTISE + 0x10;
1983         bp->mii_lpa = MII_LPA + 0x10;
1984         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1985
1986         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1987         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1988
1989         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1990         if (reset_phy)
1991                 bnx2_reset_phy(bp);
1992
1993         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1994
1995         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1996         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1997         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1998         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1999
2000         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2001         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2002         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2003                 val |= BCM5708S_UP1_2G5;
2004         else
2005                 val &= ~BCM5708S_UP1_2G5;
2006         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2007
2008         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2009         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2010         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2011         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2012
2013         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2014
2015         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2016               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2017         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2018
2019         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2020
2021         return 0;
2022 }
2023
2024 static int
2025 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2026 {
2027         u32 val;
2028
2029         if (reset_phy)
2030                 bnx2_reset_phy(bp);
2031
2032         bp->mii_up1 = BCM5708S_UP1;
2033
2034         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2035         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2036         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2037
2038         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2039         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2040         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2041
2042         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2043         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2044         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2045
2046         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2047                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2048                 val |= BCM5708S_UP1_2G5;
2049                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2050         }
2051
2052         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2053             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2054             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2055                 /* increase tx signal amplitude */
2056                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2057                                BCM5708S_BLK_ADDR_TX_MISC);
2058                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2059                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2060                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2061                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2062         }
2063
2064         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2065               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2066
2067         if (val) {
2068                 u32 is_backplane;
2069
2070                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2071                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2072                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2073                                        BCM5708S_BLK_ADDR_TX_MISC);
2074                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2075                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2076                                        BCM5708S_BLK_ADDR_DIG);
2077                 }
2078         }
2079         return 0;
2080 }
2081
2082 static int
2083 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2084 {
2085         if (reset_phy)
2086                 bnx2_reset_phy(bp);
2087
2088         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2089
2090         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2091                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2092
2093         if (bp->dev->mtu > 1500) {
2094                 u32 val;
2095
2096                 /* Set extended packet length bit */
2097                 bnx2_write_phy(bp, 0x18, 0x7);
2098                 bnx2_read_phy(bp, 0x18, &val);
2099                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2100
2101                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2102                 bnx2_read_phy(bp, 0x1c, &val);
2103                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2104         }
2105         else {
2106                 u32 val;
2107
2108                 bnx2_write_phy(bp, 0x18, 0x7);
2109                 bnx2_read_phy(bp, 0x18, &val);
2110                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2111
2112                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2113                 bnx2_read_phy(bp, 0x1c, &val);
2114                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2115         }
2116
2117         return 0;
2118 }
2119
2120 static int
2121 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2122 {
2123         u32 val;
2124
2125         if (reset_phy)
2126                 bnx2_reset_phy(bp);
2127
2128         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2129                 bnx2_write_phy(bp, 0x18, 0x0c00);
2130                 bnx2_write_phy(bp, 0x17, 0x000a);
2131                 bnx2_write_phy(bp, 0x15, 0x310b);
2132                 bnx2_write_phy(bp, 0x17, 0x201f);
2133                 bnx2_write_phy(bp, 0x15, 0x9506);
2134                 bnx2_write_phy(bp, 0x17, 0x401f);
2135                 bnx2_write_phy(bp, 0x15, 0x14e2);
2136                 bnx2_write_phy(bp, 0x18, 0x0400);
2137         }
2138
2139         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2140                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2141                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2142                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2143                 val &= ~(1 << 8);
2144                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2145         }
2146
2147         if (bp->dev->mtu > 1500) {
2148                 /* Set extended packet length bit */
2149                 bnx2_write_phy(bp, 0x18, 0x7);
2150                 bnx2_read_phy(bp, 0x18, &val);
2151                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2152
2153                 bnx2_read_phy(bp, 0x10, &val);
2154                 bnx2_write_phy(bp, 0x10, val | 0x1);
2155         }
2156         else {
2157                 bnx2_write_phy(bp, 0x18, 0x7);
2158                 bnx2_read_phy(bp, 0x18, &val);
2159                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2160
2161                 bnx2_read_phy(bp, 0x10, &val);
2162                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2163         }
2164
2165         /* ethernet@wirespeed */
2166         bnx2_write_phy(bp, 0x18, 0x7007);
2167         bnx2_read_phy(bp, 0x18, &val);
2168         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2169         return 0;
2170 }
2171
2172
2173 static int
2174 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2175 {
2176         u32 val;
2177         int rc = 0;
2178
2179         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2180         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2181
2182         bp->mii_bmcr = MII_BMCR;
2183         bp->mii_bmsr = MII_BMSR;
2184         bp->mii_bmsr1 = MII_BMSR;
2185         bp->mii_adv = MII_ADVERTISE;
2186         bp->mii_lpa = MII_LPA;
2187
2188         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2189
2190         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2191                 goto setup_phy;
2192
2193         bnx2_read_phy(bp, MII_PHYSID1, &val);
2194         bp->phy_id = val << 16;
2195         bnx2_read_phy(bp, MII_PHYSID2, &val);
2196         bp->phy_id |= val & 0xffff;
2197
2198         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2199                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2200                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2201                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2202                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2203                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2204                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2205         }
2206         else {
2207                 rc = bnx2_init_copper_phy(bp, reset_phy);
2208         }
2209
2210 setup_phy:
2211         if (!rc)
2212                 rc = bnx2_setup_phy(bp, bp->phy_port);
2213
2214         return rc;
2215 }
2216
2217 static int
2218 bnx2_set_mac_loopback(struct bnx2 *bp)
2219 {
2220         u32 mac_mode;
2221
2222         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2223         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2224         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2225         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2226         bp->link_up = 1;
2227         return 0;
2228 }
2229
2230 static int bnx2_test_link(struct bnx2 *);
2231
2232 static int
2233 bnx2_set_phy_loopback(struct bnx2 *bp)
2234 {
2235         u32 mac_mode;
2236         int rc, i;
2237
2238         spin_lock_bh(&bp->phy_lock);
2239         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2240                             BMCR_SPEED1000);
2241         spin_unlock_bh(&bp->phy_lock);
2242         if (rc)
2243                 return rc;
2244
2245         for (i = 0; i < 10; i++) {
2246                 if (bnx2_test_link(bp) == 0)
2247                         break;
2248                 msleep(100);
2249         }
2250
2251         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2252         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2253                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2254                       BNX2_EMAC_MODE_25G_MODE);
2255
2256         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2257         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2258         bp->link_up = 1;
2259         return 0;
2260 }
2261
2262 static int
2263 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2264 {
2265         int i;
2266         u32 val;
2267
2268         bp->fw_wr_seq++;
2269         msg_data |= bp->fw_wr_seq;
2270
2271         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2272
2273         if (!ack)
2274                 return 0;
2275
2276         /* wait for an acknowledgement. */
2277         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2278                 msleep(10);
2279
2280                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2281
2282                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2283                         break;
2284         }
2285         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2286                 return 0;
2287
2288         /* If we timed out, inform the firmware that this is the case. */
2289         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2290                 if (!silent)
2291                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2292                                             "%x\n", msg_data);
2293
2294                 msg_data &= ~BNX2_DRV_MSG_CODE;
2295                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2296
2297                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2298
2299                 return -EBUSY;
2300         }
2301
2302         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2303                 return -EIO;
2304
2305         return 0;
2306 }
2307
2308 static int
2309 bnx2_init_5709_context(struct bnx2 *bp)
2310 {
2311         int i, ret = 0;
2312         u32 val;
2313
2314         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2315         val |= (BCM_PAGE_BITS - 8) << 16;
2316         REG_WR(bp, BNX2_CTX_COMMAND, val);
2317         for (i = 0; i < 10; i++) {
2318                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2319                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2320                         break;
2321                 udelay(2);
2322         }
2323         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2324                 return -EBUSY;
2325
2326         for (i = 0; i < bp->ctx_pages; i++) {
2327                 int j;
2328
2329                 if (bp->ctx_blk[i])
2330                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2331                 else
2332                         return -ENOMEM;
2333
2334                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2335                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2336                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2337                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2338                        (u64) bp->ctx_blk_mapping[i] >> 32);
2339                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2340                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2341                 for (j = 0; j < 10; j++) {
2342
2343                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2344                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2345                                 break;
2346                         udelay(5);
2347                 }
2348                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2349                         ret = -EBUSY;
2350                         break;
2351                 }
2352         }
2353         return ret;
2354 }
2355
2356 static void
2357 bnx2_init_context(struct bnx2 *bp)
2358 {
2359         u32 vcid;
2360
2361         vcid = 96;
2362         while (vcid) {
2363                 u32 vcid_addr, pcid_addr, offset;
2364                 int i;
2365
2366                 vcid--;
2367
2368                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2369                         u32 new_vcid;
2370
2371                         vcid_addr = GET_PCID_ADDR(vcid);
2372                         if (vcid & 0x8) {
2373                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2374                         }
2375                         else {
2376                                 new_vcid = vcid;
2377                         }
2378                         pcid_addr = GET_PCID_ADDR(new_vcid);
2379                 }
2380                 else {
2381                         vcid_addr = GET_CID_ADDR(vcid);
2382                         pcid_addr = vcid_addr;
2383                 }
2384
2385                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2386                         vcid_addr += (i << PHY_CTX_SHIFT);
2387                         pcid_addr += (i << PHY_CTX_SHIFT);
2388
2389                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2390                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2391
2392                         /* Zero out the context. */
2393                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2394                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2395                 }
2396         }
2397 }
2398
2399 static int
2400 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2401 {
2402         u16 *good_mbuf;
2403         u32 good_mbuf_cnt;
2404         u32 val;
2405
2406         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2407         if (good_mbuf == NULL) {
2408                 printk(KERN_ERR PFX "Failed to allocate memory in "
2409                                     "bnx2_alloc_bad_rbuf\n");
2410                 return -ENOMEM;
2411         }
2412
2413         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2414                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2415
2416         good_mbuf_cnt = 0;
2417
2418         /* Allocate a bunch of mbufs and save the good ones in an array. */
2419         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2420         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2421                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2422                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2423
2424                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2425
2426                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2427
2428                 /* The addresses with Bit 9 set are bad memory blocks. */
2429                 if (!(val & (1 << 9))) {
2430                         good_mbuf[good_mbuf_cnt] = (u16) val;
2431                         good_mbuf_cnt++;
2432                 }
2433
2434                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2435         }
2436
2437         /* Free the good ones back to the mbuf pool thus discarding
2438          * all the bad ones. */
2439         while (good_mbuf_cnt) {
2440                 good_mbuf_cnt--;
2441
2442                 val = good_mbuf[good_mbuf_cnt];
2443                 val = (val << 9) | val | 1;
2444
2445                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2446         }
2447         kfree(good_mbuf);
2448         return 0;
2449 }
2450
2451 static void
2452 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2453 {
2454         u32 val;
2455
2456         val = (mac_addr[0] << 8) | mac_addr[1];
2457
2458         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2459
2460         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2461                 (mac_addr[4] << 8) | mac_addr[5];
2462
2463         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2464 }
2465
2466 static inline int
2467 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2468 {
2469         dma_addr_t mapping;
2470         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2471         struct rx_bd *rxbd =
2472                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2473         struct page *page = alloc_page(GFP_ATOMIC);
2474
2475         if (!page)
2476                 return -ENOMEM;
2477         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2478                                PCI_DMA_FROMDEVICE);
2479         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2480                 __free_page(page);
2481                 return -EIO;
2482         }
2483
2484         rx_pg->page = page;
2485         pci_unmap_addr_set(rx_pg, mapping, mapping);
2486         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2487         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2488         return 0;
2489 }
2490
2491 static void
2492 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2493 {
2494         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2495         struct page *page = rx_pg->page;
2496
2497         if (!page)
2498                 return;
2499
2500         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2501                        PCI_DMA_FROMDEVICE);
2502
2503         __free_page(page);
2504         rx_pg->page = NULL;
2505 }
2506
2507 static inline int
2508 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2509 {
2510         struct sk_buff *skb;
2511         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2512         dma_addr_t mapping;
2513         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2514         unsigned long align;
2515
2516         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2517         if (skb == NULL) {
2518                 return -ENOMEM;
2519         }
2520
2521         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2522                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2523
2524         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2525                 PCI_DMA_FROMDEVICE);
2526         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2527                 dev_kfree_skb(skb);
2528                 return -EIO;
2529         }
2530
2531         rx_buf->skb = skb;
2532         pci_unmap_addr_set(rx_buf, mapping, mapping);
2533
2534         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2535         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2536
2537         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2538
2539         return 0;
2540 }
2541
2542 static int
2543 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2544 {
2545         struct status_block *sblk = bnapi->status_blk.msi;
2546         u32 new_link_state, old_link_state;
2547         int is_set = 1;
2548
2549         new_link_state = sblk->status_attn_bits & event;
2550         old_link_state = sblk->status_attn_bits_ack & event;
2551         if (new_link_state != old_link_state) {
2552                 if (new_link_state)
2553                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2554                 else
2555                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2556         } else
2557                 is_set = 0;
2558
2559         return is_set;
2560 }
2561
2562 static void
2563 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2564 {
2565         spin_lock(&bp->phy_lock);
2566
2567         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2568                 bnx2_set_link(bp);
2569         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2570                 bnx2_set_remote_link(bp);
2571
2572         spin_unlock(&bp->phy_lock);
2573
2574 }
2575
2576 static inline u16
2577 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2578 {
2579         u16 cons;
2580
2581         /* Tell compiler that status block fields can change. */
2582         barrier();
2583         cons = *bnapi->hw_tx_cons_ptr;
2584         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2585                 cons++;
2586         return cons;
2587 }
2588
2589 static int
2590 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2591 {
2592         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2593         u16 hw_cons, sw_cons, sw_ring_cons;
2594         int tx_pkt = 0, index;
2595         struct netdev_queue *txq;
2596
2597         index = (bnapi - bp->bnx2_napi);
2598         txq = netdev_get_tx_queue(bp->dev, index);
2599
2600         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2601         sw_cons = txr->tx_cons;
2602
2603         while (sw_cons != hw_cons) {
2604                 struct sw_tx_bd *tx_buf;
2605                 struct sk_buff *skb;
2606                 int i, last;
2607
2608                 sw_ring_cons = TX_RING_IDX(sw_cons);
2609
2610                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2611                 skb = tx_buf->skb;
2612
2613                 /* partial BD completions possible with TSO packets */
2614                 if (skb_is_gso(skb)) {
2615                         u16 last_idx, last_ring_idx;
2616
2617                         last_idx = sw_cons +
2618                                 skb_shinfo(skb)->nr_frags + 1;
2619                         last_ring_idx = sw_ring_cons +
2620                                 skb_shinfo(skb)->nr_frags + 1;
2621                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2622                                 last_idx++;
2623                         }
2624                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2625                                 break;
2626                         }
2627                 }
2628
2629                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2630
2631                 tx_buf->skb = NULL;
2632                 last = skb_shinfo(skb)->nr_frags;
2633
2634                 for (i = 0; i < last; i++) {
2635                         sw_cons = NEXT_TX_BD(sw_cons);
2636                 }
2637
2638                 sw_cons = NEXT_TX_BD(sw_cons);
2639
2640                 dev_kfree_skb(skb);
2641                 tx_pkt++;
2642                 if (tx_pkt == budget)
2643                         break;
2644
2645                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2646         }
2647
2648         txr->hw_tx_cons = hw_cons;
2649         txr->tx_cons = sw_cons;
2650
2651         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2652          * before checking for netif_tx_queue_stopped().  Without the
2653          * memory barrier, there is a small possibility that bnx2_start_xmit()
2654          * will miss it and cause the queue to be stopped forever.
2655          */
2656         smp_mb();
2657
2658         if (unlikely(netif_tx_queue_stopped(txq)) &&
2659                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2660                 __netif_tx_lock(txq, smp_processor_id());
2661                 if ((netif_tx_queue_stopped(txq)) &&
2662                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2663                         netif_tx_wake_queue(txq);
2664                 __netif_tx_unlock(txq);
2665         }
2666
2667         return tx_pkt;
2668 }
2669
2670 static void
2671 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2672                         struct sk_buff *skb, int count)
2673 {
2674         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2675         struct rx_bd *cons_bd, *prod_bd;
2676         int i;
2677         u16 hw_prod, prod;
2678         u16 cons = rxr->rx_pg_cons;
2679
2680         cons_rx_pg = &rxr->rx_pg_ring[cons];
2681
2682         /* The caller was unable to allocate a new page to replace the
2683          * last one in the frags array, so we need to recycle that page
2684          * and then free the skb.
2685          */
2686         if (skb) {
2687                 struct page *page;
2688                 struct skb_shared_info *shinfo;
2689
2690                 shinfo = skb_shinfo(skb);
2691                 shinfo->nr_frags--;
2692                 page = shinfo->frags[shinfo->nr_frags].page;
2693                 shinfo->frags[shinfo->nr_frags].page = NULL;
2694
2695                 cons_rx_pg->page = page;
2696                 dev_kfree_skb(skb);
2697         }
2698
2699         hw_prod = rxr->rx_pg_prod;
2700
2701         for (i = 0; i < count; i++) {
2702                 prod = RX_PG_RING_IDX(hw_prod);
2703
2704                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2705                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2706                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2707                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2708
2709                 if (prod != cons) {
2710                         prod_rx_pg->page = cons_rx_pg->page;
2711                         cons_rx_pg->page = NULL;
2712                         pci_unmap_addr_set(prod_rx_pg, mapping,
2713                                 pci_unmap_addr(cons_rx_pg, mapping));
2714
2715                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2716                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2717
2718                 }
2719                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2720                 hw_prod = NEXT_RX_BD(hw_prod);
2721         }
2722         rxr->rx_pg_prod = hw_prod;
2723         rxr->rx_pg_cons = cons;
2724 }
2725
2726 static inline void
2727 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2728                   struct sk_buff *skb, u16 cons, u16 prod)
2729 {
2730         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2731         struct rx_bd *cons_bd, *prod_bd;
2732
2733         cons_rx_buf = &rxr->rx_buf_ring[cons];
2734         prod_rx_buf = &rxr->rx_buf_ring[prod];
2735
2736         pci_dma_sync_single_for_device(bp->pdev,
2737                 pci_unmap_addr(cons_rx_buf, mapping),
2738                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2739
2740         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2741
2742         prod_rx_buf->skb = skb;
2743
2744         if (cons == prod)
2745                 return;
2746
2747         pci_unmap_addr_set(prod_rx_buf, mapping,
2748                         pci_unmap_addr(cons_rx_buf, mapping));
2749
2750         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2751         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2752         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2753         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2754 }
2755
2756 static int
2757 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2758             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2759             u32 ring_idx)
2760 {
2761         int err;
2762         u16 prod = ring_idx & 0xffff;
2763
2764         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2765         if (unlikely(err)) {
2766                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2767                 if (hdr_len) {
2768                         unsigned int raw_len = len + 4;
2769                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2770
2771                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2772                 }
2773                 return err;
2774         }
2775
2776         skb_reserve(skb, BNX2_RX_OFFSET);
2777         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2778                          PCI_DMA_FROMDEVICE);
2779
2780         if (hdr_len == 0) {
2781                 skb_put(skb, len);
2782                 return 0;
2783         } else {
2784                 unsigned int i, frag_len, frag_size, pages;
2785                 struct sw_pg *rx_pg;
2786                 u16 pg_cons = rxr->rx_pg_cons;
2787                 u16 pg_prod = rxr->rx_pg_prod;
2788
2789                 frag_size = len + 4 - hdr_len;
2790                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2791                 skb_put(skb, hdr_len);
2792
2793                 for (i = 0; i < pages; i++) {
2794                         dma_addr_t mapping_old;
2795
2796                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2797                         if (unlikely(frag_len <= 4)) {
2798                                 unsigned int tail = 4 - frag_len;
2799
2800                                 rxr->rx_pg_cons = pg_cons;
2801                                 rxr->rx_pg_prod = pg_prod;
2802                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2803                                                         pages - i);
2804                                 skb->len -= tail;
2805                                 if (i == 0) {
2806                                         skb->tail -= tail;
2807                                 } else {
2808                                         skb_frag_t *frag =
2809                                                 &skb_shinfo(skb)->frags[i - 1];
2810                                         frag->size -= tail;
2811                                         skb->data_len -= tail;
2812                                         skb->truesize -= tail;
2813                                 }
2814                                 return 0;
2815                         }
2816                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2817
2818                         /* Don't unmap yet.  If we're unable to allocate a new
2819                          * page, we need to recycle the page and the DMA addr.
2820                          */
2821                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2822                         if (i == pages - 1)
2823                                 frag_len -= 4;
2824
2825                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2826                         rx_pg->page = NULL;
2827
2828                         err = bnx2_alloc_rx_page(bp, rxr,
2829                                                  RX_PG_RING_IDX(pg_prod));
2830                         if (unlikely(err)) {
2831                                 rxr->rx_pg_cons = pg_cons;
2832                                 rxr->rx_pg_prod = pg_prod;
2833                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2834                                                         pages - i);
2835                                 return err;
2836                         }
2837
2838                         pci_unmap_page(bp->pdev, mapping_old,
2839                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2840
2841                         frag_size -= frag_len;
2842                         skb->data_len += frag_len;
2843                         skb->truesize += frag_len;
2844                         skb->len += frag_len;
2845
2846                         pg_prod = NEXT_RX_BD(pg_prod);
2847                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2848                 }
2849                 rxr->rx_pg_prod = pg_prod;
2850                 rxr->rx_pg_cons = pg_cons;
2851         }
2852         return 0;
2853 }
2854
2855 static inline u16
2856 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2857 {
2858         u16 cons;
2859
2860         /* Tell compiler that status block fields can change. */
2861         barrier();
2862         cons = *bnapi->hw_rx_cons_ptr;
2863         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2864                 cons++;
2865         return cons;
2866 }
2867
2868 static int
2869 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2870 {
2871         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2872         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2873         struct l2_fhdr *rx_hdr;
2874         int rx_pkt = 0, pg_ring_used = 0;
2875
2876         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2877         sw_cons = rxr->rx_cons;
2878         sw_prod = rxr->rx_prod;
2879
2880         /* Memory barrier necessary as speculative reads of the rx
2881          * buffer can be ahead of the index in the status block
2882          */
2883         rmb();
2884         while (sw_cons != hw_cons) {
2885                 unsigned int len, hdr_len;
2886                 u32 status;
2887                 struct sw_bd *rx_buf;
2888                 struct sk_buff *skb;
2889                 dma_addr_t dma_addr;
2890                 u16 vtag = 0;
2891                 int hw_vlan __maybe_unused = 0;
2892
2893                 sw_ring_cons = RX_RING_IDX(sw_cons);
2894                 sw_ring_prod = RX_RING_IDX(sw_prod);
2895
2896                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2897                 skb = rx_buf->skb;
2898
2899                 rx_buf->skb = NULL;
2900
2901                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2902
2903                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2904                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2905                         PCI_DMA_FROMDEVICE);
2906
2907                 rx_hdr = (struct l2_fhdr *) skb->data;
2908                 len = rx_hdr->l2_fhdr_pkt_len;
2909
2910                 if ((status = rx_hdr->l2_fhdr_status) &
2911                         (L2_FHDR_ERRORS_BAD_CRC |
2912                         L2_FHDR_ERRORS_PHY_DECODE |
2913                         L2_FHDR_ERRORS_ALIGNMENT |
2914                         L2_FHDR_ERRORS_TOO_SHORT |
2915                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2916
2917                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2918                                           sw_ring_prod);
2919                         goto next_rx;
2920                 }
2921                 hdr_len = 0;
2922                 if (status & L2_FHDR_STATUS_SPLIT) {
2923                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2924                         pg_ring_used = 1;
2925                 } else if (len > bp->rx_jumbo_thresh) {
2926                         hdr_len = bp->rx_jumbo_thresh;
2927                         pg_ring_used = 1;
2928                 }
2929
2930                 len -= 4;
2931
2932                 if (len <= bp->rx_copy_thresh) {
2933                         struct sk_buff *new_skb;
2934
2935                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2936                         if (new_skb == NULL) {
2937                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2938                                                   sw_ring_prod);
2939                                 goto next_rx;
2940                         }
2941
2942                         /* aligned copy */
2943                         skb_copy_from_linear_data_offset(skb,
2944                                                          BNX2_RX_OFFSET - 6,
2945                                       new_skb->data, len + 6);
2946                         skb_reserve(new_skb, 6);
2947                         skb_put(new_skb, len);
2948
2949                         bnx2_reuse_rx_skb(bp, rxr, skb,
2950                                 sw_ring_cons, sw_ring_prod);
2951
2952                         skb = new_skb;
2953                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2954                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2955                         goto next_rx;
2956
2957                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2958                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2959                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2960 #ifdef BCM_VLAN
2961                         if (bp->vlgrp)
2962                                 hw_vlan = 1;
2963                         else
2964 #endif
2965                         {
2966                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2967                                         __skb_push(skb, 4);
2968
2969                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2970                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
2971                                 ve->h_vlan_TCI = htons(vtag);
2972                                 len += 4;
2973                         }
2974                 }
2975
2976                 skb->protocol = eth_type_trans(skb, bp->dev);
2977
2978                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2979                         (ntohs(skb->protocol) != 0x8100)) {
2980
2981                         dev_kfree_skb(skb);
2982                         goto next_rx;
2983
2984                 }
2985
2986                 skb->ip_summed = CHECKSUM_NONE;
2987                 if (bp->rx_csum &&
2988                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2989                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2990
2991                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2992                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2993                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2994                 }
2995
2996 #ifdef BCM_VLAN
2997                 if (hw_vlan)
2998                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
2999                 else
3000 #endif
3001                         netif_receive_skb(skb);
3002
3003                 bp->dev->last_rx = jiffies;
3004                 rx_pkt++;
3005
3006 next_rx:
3007                 sw_cons = NEXT_RX_BD(sw_cons);
3008                 sw_prod = NEXT_RX_BD(sw_prod);
3009
3010                 if ((rx_pkt == budget))
3011                         break;
3012
3013                 /* Refresh hw_cons to see if there is new work */
3014                 if (sw_cons == hw_cons) {
3015                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3016                         rmb();
3017                 }
3018         }
3019         rxr->rx_cons = sw_cons;
3020         rxr->rx_prod = sw_prod;
3021
3022         if (pg_ring_used)
3023                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3024
3025         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3026
3027         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3028
3029         mmiowb();
3030
3031         return rx_pkt;
3032
3033 }
3034
3035 /* MSI ISR - The only difference between this and the INTx ISR
3036  * is that the MSI interrupt is always serviced.
3037  */
3038 static irqreturn_t
3039 bnx2_msi(int irq, void *dev_instance)
3040 {
3041         struct bnx2_napi *bnapi = dev_instance;
3042         struct bnx2 *bp = bnapi->bp;
3043         struct net_device *dev = bp->dev;
3044
3045         prefetch(bnapi->status_blk.msi);
3046         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3047                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3048                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3049
3050         /* Return here if interrupt is disabled. */
3051         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3052                 return IRQ_HANDLED;
3053
3054         netif_rx_schedule(dev, &bnapi->napi);
3055
3056         return IRQ_HANDLED;
3057 }
3058
3059 static irqreturn_t
3060 bnx2_msi_1shot(int irq, void *dev_instance)
3061 {
3062         struct bnx2_napi *bnapi = dev_instance;
3063         struct bnx2 *bp = bnapi->bp;
3064         struct net_device *dev = bp->dev;
3065
3066         prefetch(bnapi->status_blk.msi);
3067
3068         /* Return here if interrupt is disabled. */
3069         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3070                 return IRQ_HANDLED;
3071
3072         netif_rx_schedule(dev, &bnapi->napi);
3073
3074         return IRQ_HANDLED;
3075 }
3076
3077 static irqreturn_t
3078 bnx2_interrupt(int irq, void *dev_instance)
3079 {
3080         struct bnx2_napi *bnapi = dev_instance;
3081         struct bnx2 *bp = bnapi->bp;
3082         struct net_device *dev = bp->dev;
3083         struct status_block *sblk = bnapi->status_blk.msi;
3084
3085         /* When using INTx, it is possible for the interrupt to arrive
3086          * at the CPU before the status block posted prior to the
3087          * interrupt. Reading a register will flush the status block.
3088          * When using MSI, the MSI message will always complete after
3089          * the status block write.
3090          */
3091         if ((sblk->status_idx == bnapi->last_status_idx) &&
3092             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3093              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3094                 return IRQ_NONE;
3095
3096         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3097                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3098                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3099
3100         /* Read back to deassert IRQ immediately to avoid too many
3101          * spurious interrupts.
3102          */
3103         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3104
3105         /* Return here if interrupt is shared and is disabled. */
3106         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3107                 return IRQ_HANDLED;
3108
3109         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3110                 bnapi->last_status_idx = sblk->status_idx;
3111                 __netif_rx_schedule(dev, &bnapi->napi);
3112         }
3113
3114         return IRQ_HANDLED;
3115 }
3116
3117 static inline int
3118 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3119 {
3120         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3121         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3122
3123         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3124             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3125                 return 1;
3126         return 0;
3127 }
3128
3129 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3130                                  STATUS_ATTN_BITS_TIMER_ABORT)
3131
3132 static inline int
3133 bnx2_has_work(struct bnx2_napi *bnapi)
3134 {
3135         struct status_block *sblk = bnapi->status_blk.msi;
3136
3137         if (bnx2_has_fast_work(bnapi))
3138                 return 1;
3139
3140         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3141             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3142                 return 1;
3143
3144         return 0;
3145 }
3146
3147 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3148 {
3149         struct status_block *sblk = bnapi->status_blk.msi;
3150         u32 status_attn_bits = sblk->status_attn_bits;
3151         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3152
3153         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3154             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3155
3156                 bnx2_phy_int(bp, bnapi);
3157
3158                 /* This is needed to take care of transient status
3159                  * during link changes.
3160                  */
3161                 REG_WR(bp, BNX2_HC_COMMAND,
3162                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3163                 REG_RD(bp, BNX2_HC_COMMAND);
3164         }
3165 }
3166
3167 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3168                           int work_done, int budget)
3169 {
3170         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3171         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3172
3173         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3174                 bnx2_tx_int(bp, bnapi, 0);
3175
3176         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3177                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3178
3179         return work_done;
3180 }
3181
3182 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3183 {
3184         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3185         struct bnx2 *bp = bnapi->bp;
3186         int work_done = 0;
3187         struct status_block_msix *sblk = bnapi->status_blk.msix;
3188
3189         while (1) {
3190                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3191                 if (unlikely(work_done >= budget))
3192                         break;
3193
3194                 bnapi->last_status_idx = sblk->status_idx;
3195                 /* status idx must be read before checking for more work. */
3196                 rmb();
3197                 if (likely(!bnx2_has_fast_work(bnapi))) {
3198
3199                         netif_rx_complete(bp->dev, napi);
3200                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3201                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3202                                bnapi->last_status_idx);
3203                         break;
3204                 }
3205         }
3206         return work_done;
3207 }
3208
3209 static int bnx2_poll(struct napi_struct *napi, int budget)
3210 {
3211         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3212         struct bnx2 *bp = bnapi->bp;
3213         int work_done = 0;
3214         struct status_block *sblk = bnapi->status_blk.msi;
3215
3216         while (1) {
3217                 bnx2_poll_link(bp, bnapi);
3218
3219                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3220
3221                 if (unlikely(work_done >= budget))
3222                         break;
3223
3224                 /* bnapi->last_status_idx is used below to tell the hw how
3225                  * much work has been processed, so we must read it before
3226                  * checking for more work.
3227                  */
3228                 bnapi->last_status_idx = sblk->status_idx;
3229                 rmb();
3230                 if (likely(!bnx2_has_work(bnapi))) {
3231                         netif_rx_complete(bp->dev, napi);
3232                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3233                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3234                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3235                                        bnapi->last_status_idx);
3236                                 break;
3237                         }
3238                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3239                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3240                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3241                                bnapi->last_status_idx);
3242
3243                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3244                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3245                                bnapi->last_status_idx);
3246                         break;
3247                 }
3248         }
3249
3250         return work_done;
3251 }
3252
3253 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3254  * from set_multicast.
3255  */
3256 static void
3257 bnx2_set_rx_mode(struct net_device *dev)
3258 {
3259         struct bnx2 *bp = netdev_priv(dev);
3260         u32 rx_mode, sort_mode;
3261         struct dev_addr_list *uc_ptr;
3262         int i;
3263
3264         if (!netif_running(dev))
3265                 return;
3266
3267         spin_lock_bh(&bp->phy_lock);
3268
3269         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3270                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3271         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3272 #ifdef BCM_VLAN
3273         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3274                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3275 #else
3276         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3277                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3278 #endif
3279         if (dev->flags & IFF_PROMISC) {
3280                 /* Promiscuous mode. */
3281                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3282                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3283                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3284         }
3285         else if (dev->flags & IFF_ALLMULTI) {
3286                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3287                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3288                                0xffffffff);
3289                 }
3290                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3291         }
3292         else {
3293                 /* Accept one or more multicast(s). */
3294                 struct dev_mc_list *mclist;
3295                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3296                 u32 regidx;
3297                 u32 bit;
3298                 u32 crc;
3299
3300                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3301
3302                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3303                      i++, mclist = mclist->next) {
3304
3305                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3306                         bit = crc & 0xff;
3307                         regidx = (bit & 0xe0) >> 5;
3308                         bit &= 0x1f;
3309                         mc_filter[regidx] |= (1 << bit);
3310                 }
3311
3312                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3313                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3314                                mc_filter[i]);
3315                 }
3316
3317                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3318         }
3319
3320         uc_ptr = NULL;
3321         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3322                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3323                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3324                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3325         } else if (!(dev->flags & IFF_PROMISC)) {
3326                 uc_ptr = dev->uc_list;
3327
3328                 /* Add all entries into to the match filter list */
3329                 for (i = 0; i < dev->uc_count; i++) {
3330                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3331                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3332                         sort_mode |= (1 <<
3333                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3334                         uc_ptr = uc_ptr->next;
3335                 }
3336
3337         }
3338
3339         if (rx_mode != bp->rx_mode) {
3340                 bp->rx_mode = rx_mode;
3341                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3342         }
3343
3344         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3345         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3346         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3347
3348         spin_unlock_bh(&bp->phy_lock);
3349 }
3350
3351 static void
3352 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3353         u32 rv2p_proc)
3354 {
3355         int i;
3356         u32 val;
3357
3358         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3359                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3360                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3361                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3362                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3363         }
3364
3365         for (i = 0; i < rv2p_code_len; i += 8) {
3366                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3367                 rv2p_code++;
3368                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3369                 rv2p_code++;
3370
3371                 if (rv2p_proc == RV2P_PROC1) {
3372                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3373                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3374                 }
3375                 else {
3376                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3377                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3378                 }
3379         }
3380
3381         /* Reset the processor, un-stall is done later. */
3382         if (rv2p_proc == RV2P_PROC1) {
3383                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3384         }
3385         else {
3386                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3387         }
3388 }
3389
3390 static int
3391 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3392 {
3393         u32 offset;
3394         u32 val;
3395         int rc;
3396
3397         /* Halt the CPU. */
3398         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3399         val |= cpu_reg->mode_value_halt;
3400         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3401         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3402
3403         /* Load the Text area. */
3404         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3405         if (fw->gz_text) {
3406                 int j;
3407
3408                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3409                                        fw->gz_text_len);
3410                 if (rc < 0)
3411                         return rc;
3412
3413                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3414                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3415                 }
3416         }
3417
3418         /* Load the Data area. */
3419         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3420         if (fw->data) {
3421                 int j;
3422
3423                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3424                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3425                 }
3426         }
3427
3428         /* Load the SBSS area. */
3429         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3430         if (fw->sbss_len) {
3431                 int j;
3432
3433                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3434                         bnx2_reg_wr_ind(bp, offset, 0);
3435                 }
3436         }
3437
3438         /* Load the BSS area. */
3439         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3440         if (fw->bss_len) {
3441                 int j;
3442
3443                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3444                         bnx2_reg_wr_ind(bp, offset, 0);
3445                 }
3446         }
3447
3448         /* Load the Read-Only area. */
3449         offset = cpu_reg->spad_base +
3450                 (fw->rodata_addr - cpu_reg->mips_view_base);
3451         if (fw->rodata) {
3452                 int j;
3453
3454                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3455                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3456                 }
3457         }
3458
3459         /* Clear the pre-fetch instruction. */
3460         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3461         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3462
3463         /* Start the CPU. */
3464         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3465         val &= ~cpu_reg->mode_value_halt;
3466         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3467         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3468
3469         return 0;
3470 }
3471
3472 static int
3473 bnx2_init_cpus(struct bnx2 *bp)
3474 {
3475         struct fw_info *fw;
3476         int rc, rv2p_len;
3477         void *text, *rv2p;
3478
3479         /* Initialize the RV2P processor. */
3480         text = vmalloc(FW_BUF_SIZE);
3481         if (!text)
3482                 return -ENOMEM;
3483         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3484                 rv2p = bnx2_xi_rv2p_proc1;
3485                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3486         } else {
3487                 rv2p = bnx2_rv2p_proc1;
3488                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3489         }
3490         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3491         if (rc < 0)
3492                 goto init_cpu_err;
3493
3494         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3495
3496         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3497                 rv2p = bnx2_xi_rv2p_proc2;
3498                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3499         } else {
3500                 rv2p = bnx2_rv2p_proc2;
3501                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3502         }
3503         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3504         if (rc < 0)
3505                 goto init_cpu_err;
3506
3507         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3508
3509         /* Initialize the RX Processor. */
3510         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3511                 fw = &bnx2_rxp_fw_09;
3512         else
3513                 fw = &bnx2_rxp_fw_06;
3514
3515         fw->text = text;
3516         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3517         if (rc)
3518                 goto init_cpu_err;
3519
3520         /* Initialize the TX Processor. */
3521         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3522                 fw = &bnx2_txp_fw_09;
3523         else
3524                 fw = &bnx2_txp_fw_06;
3525
3526         fw->text = text;
3527         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3528         if (rc)
3529                 goto init_cpu_err;
3530
3531         /* Initialize the TX Patch-up Processor. */
3532         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3533                 fw = &bnx2_tpat_fw_09;
3534         else
3535                 fw = &bnx2_tpat_fw_06;
3536
3537         fw->text = text;
3538         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3539         if (rc)
3540                 goto init_cpu_err;
3541
3542         /* Initialize the Completion Processor. */
3543         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3544                 fw = &bnx2_com_fw_09;
3545         else
3546                 fw = &bnx2_com_fw_06;
3547
3548         fw->text = text;
3549         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3550         if (rc)
3551                 goto init_cpu_err;
3552
3553         /* Initialize the Command Processor. */
3554         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3555                 fw = &bnx2_cp_fw_09;
3556         else
3557                 fw = &bnx2_cp_fw_06;
3558
3559         fw->text = text;
3560         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3561
3562 init_cpu_err:
3563         vfree(text);
3564         return rc;
3565 }
3566
3567 static int
3568 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3569 {
3570         u16 pmcsr;
3571
3572         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3573
3574         switch (state) {
3575         case PCI_D0: {
3576                 u32 val;
3577
3578                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3579                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3580                         PCI_PM_CTRL_PME_STATUS);
3581
3582                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3583                         /* delay required during transition out of D3hot */
3584                         msleep(20);
3585
3586                 val = REG_RD(bp, BNX2_EMAC_MODE);
3587                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3588                 val &= ~BNX2_EMAC_MODE_MPKT;
3589                 REG_WR(bp, BNX2_EMAC_MODE, val);
3590
3591                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3592                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3593                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3594                 break;
3595         }
3596         case PCI_D3hot: {
3597                 int i;
3598                 u32 val, wol_msg;
3599
3600                 if (bp->wol) {
3601                         u32 advertising;
3602                         u8 autoneg;
3603
3604                         autoneg = bp->autoneg;
3605                         advertising = bp->advertising;
3606
3607                         if (bp->phy_port == PORT_TP) {
3608                                 bp->autoneg = AUTONEG_SPEED;
3609                                 bp->advertising = ADVERTISED_10baseT_Half |
3610                                         ADVERTISED_10baseT_Full |
3611                                         ADVERTISED_100baseT_Half |
3612                                         ADVERTISED_100baseT_Full |
3613                                         ADVERTISED_Autoneg;
3614                         }
3615
3616                         spin_lock_bh(&bp->phy_lock);
3617                         bnx2_setup_phy(bp, bp->phy_port);
3618                         spin_unlock_bh(&bp->phy_lock);
3619
3620                         bp->autoneg = autoneg;
3621                         bp->advertising = advertising;
3622
3623                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3624
3625                         val = REG_RD(bp, BNX2_EMAC_MODE);
3626
3627                         /* Enable port mode. */
3628                         val &= ~BNX2_EMAC_MODE_PORT;
3629                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3630                                BNX2_EMAC_MODE_ACPI_RCVD |
3631                                BNX2_EMAC_MODE_MPKT;
3632                         if (bp->phy_port == PORT_TP)
3633                                 val |= BNX2_EMAC_MODE_PORT_MII;
3634                         else {
3635                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3636                                 if (bp->line_speed == SPEED_2500)
3637                                         val |= BNX2_EMAC_MODE_25G_MODE;
3638                         }
3639
3640                         REG_WR(bp, BNX2_EMAC_MODE, val);
3641
3642                         /* receive all multicast */
3643                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3644                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3645                                        0xffffffff);
3646                         }
3647                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3648                                BNX2_EMAC_RX_MODE_SORT_MODE);
3649
3650                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3651                               BNX2_RPM_SORT_USER0_MC_EN;
3652                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3654                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3655                                BNX2_RPM_SORT_USER0_ENA);
3656
3657                         /* Need to enable EMAC and RPM for WOL. */
3658                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3659                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3660                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3661                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3662
3663                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3664                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3665                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3666
3667                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3668                 }
3669                 else {
3670                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3671                 }
3672
3673                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3674                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3675                                      1, 0);
3676
3677                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3678                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3679                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3680
3681                         if (bp->wol)
3682                                 pmcsr |= 3;
3683                 }
3684                 else {
3685                         pmcsr |= 3;
3686                 }
3687                 if (bp->wol) {
3688                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3689                 }
3690                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3691                                       pmcsr);
3692
3693                 /* No more memory access after this point until
3694                  * device is brought back to D0.
3695                  */
3696                 udelay(50);
3697                 break;
3698         }
3699         default:
3700                 return -EINVAL;
3701         }
3702         return 0;
3703 }
3704
3705 static int
3706 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3707 {
3708         u32 val;
3709         int j;
3710
3711         /* Request access to the flash interface. */
3712         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3713         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3714                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3715                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3716                         break;
3717
3718                 udelay(5);
3719         }
3720
3721         if (j >= NVRAM_TIMEOUT_COUNT)
3722                 return -EBUSY;
3723
3724         return 0;
3725 }
3726
3727 static int
3728 bnx2_release_nvram_lock(struct bnx2 *bp)
3729 {
3730         int j;
3731         u32 val;
3732
3733         /* Relinquish nvram interface. */
3734         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3735
3736         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3737                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3738                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3739                         break;
3740
3741                 udelay(5);
3742         }
3743
3744         if (j >= NVRAM_TIMEOUT_COUNT)
3745                 return -EBUSY;
3746
3747         return 0;
3748 }
3749
3750
3751 static int
3752 bnx2_enable_nvram_write(struct bnx2 *bp)
3753 {
3754         u32 val;
3755
3756         val = REG_RD(bp, BNX2_MISC_CFG);
3757         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3758
3759         if (bp->flash_info->flags & BNX2_NV_WREN) {
3760                 int j;
3761
3762                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3763                 REG_WR(bp, BNX2_NVM_COMMAND,
3764                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3765
3766                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3767                         udelay(5);
3768
3769                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3770                         if (val & BNX2_NVM_COMMAND_DONE)
3771                                 break;
3772                 }
3773
3774                 if (j >= NVRAM_TIMEOUT_COUNT)
3775                         return -EBUSY;
3776         }
3777         return 0;
3778 }
3779
3780 static void
3781 bnx2_disable_nvram_write(struct bnx2 *bp)
3782 {
3783         u32 val;
3784
3785         val = REG_RD(bp, BNX2_MISC_CFG);
3786         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3787 }
3788
3789
3790 static void
3791 bnx2_enable_nvram_access(struct bnx2 *bp)
3792 {
3793         u32 val;
3794
3795         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3796         /* Enable both bits, even on read. */
3797         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3798                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3799 }
3800
3801 static void
3802 bnx2_disable_nvram_access(struct bnx2 *bp)
3803 {
3804         u32 val;
3805
3806         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3807         /* Disable both bits, even after read. */
3808         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3809                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3810                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3811 }
3812
3813 static int
3814 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3815 {
3816         u32 cmd;
3817         int j;
3818
3819         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3820                 /* Buffered flash, no erase needed */
3821                 return 0;
3822
3823         /* Build an erase command */
3824         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3825               BNX2_NVM_COMMAND_DOIT;
3826
3827         /* Need to clear DONE bit separately. */
3828         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3829
3830         /* Address of the NVRAM to read from. */
3831         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3832
3833         /* Issue an erase command. */
3834         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3835
3836         /* Wait for completion. */
3837         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3838                 u32 val;
3839
3840                 udelay(5);
3841
3842                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3843                 if (val & BNX2_NVM_COMMAND_DONE)
3844                         break;
3845         }
3846
3847         if (j >= NVRAM_TIMEOUT_COUNT)
3848                 return -EBUSY;
3849
3850         return 0;
3851 }
3852
3853 static int
3854 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3855 {
3856         u32 cmd;
3857         int j;
3858
3859         /* Build the command word. */
3860         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3861
3862         /* Calculate an offset of a buffered flash, not needed for 5709. */
3863         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3864                 offset = ((offset / bp->flash_info->page_size) <<
3865                            bp->flash_info->page_bits) +
3866                           (offset % bp->flash_info->page_size);
3867         }
3868
3869         /* Need to clear DONE bit separately. */
3870         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3871
3872         /* Address of the NVRAM to read from. */
3873         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3874
3875         /* Issue a read command. */
3876         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3877
3878         /* Wait for completion. */
3879         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3880                 u32 val;
3881
3882                 udelay(5);
3883
3884                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3885                 if (val & BNX2_NVM_COMMAND_DONE) {
3886                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3887                         memcpy(ret_val, &v, 4);
3888                         break;
3889                 }
3890         }
3891         if (j >= NVRAM_TIMEOUT_COUNT)
3892                 return -EBUSY;
3893
3894         return 0;
3895 }
3896
3897
3898 static int
3899 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3900 {
3901         u32 cmd;
3902         __be32 val32;
3903         int j;
3904
3905         /* Build the command word. */
3906         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3907
3908         /* Calculate an offset of a buffered flash, not needed for 5709. */
3909         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3910                 offset = ((offset / bp->flash_info->page_size) <<
3911                           bp->flash_info->page_bits) +
3912                          (offset % bp->flash_info->page_size);
3913         }
3914
3915         /* Need to clear DONE bit separately. */
3916         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3917
3918         memcpy(&val32, val, 4);
3919
3920         /* Write the data. */
3921         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3922
3923         /* Address of the NVRAM to write to. */
3924         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3925
3926         /* Issue the write command. */
3927         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3928
3929         /* Wait for completion. */
3930         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3931                 udelay(5);
3932
3933                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3934                         break;
3935         }
3936         if (j >= NVRAM_TIMEOUT_COUNT)
3937                 return -EBUSY;
3938
3939         return 0;
3940 }
3941
3942 static int
3943 bnx2_init_nvram(struct bnx2 *bp)
3944 {
3945         u32 val;
3946         int j, entry_count, rc = 0;
3947         struct flash_spec *flash;
3948
3949         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3950                 bp->flash_info = &flash_5709;
3951                 goto get_flash_size;
3952         }
3953
3954         /* Determine the selected interface. */
3955         val = REG_RD(bp, BNX2_NVM_CFG1);
3956
3957         entry_count = ARRAY_SIZE(flash_table);
3958
3959         if (val & 0x40000000) {
3960
3961                 /* Flash interface has been reconfigured */
3962                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3963                      j++, flash++) {
3964                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3965                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3966                                 bp->flash_info = flash;
3967                                 break;
3968                         }
3969                 }
3970         }
3971         else {
3972                 u32 mask;
3973                 /* Not yet been reconfigured */
3974
3975                 if (val & (1 << 23))
3976                         mask = FLASH_BACKUP_STRAP_MASK;
3977                 else
3978                         mask = FLASH_STRAP_MASK;
3979
3980                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3981                         j++, flash++) {
3982
3983                         if ((val & mask) == (flash->strapping & mask)) {
3984                                 bp->flash_info = flash;
3985
3986                                 /* Request access to the flash interface. */
3987                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3988                                         return rc;
3989
3990                                 /* Enable access to flash interface */
3991                                 bnx2_enable_nvram_access(bp);
3992
3993                                 /* Reconfigure the flash interface */
3994                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3995                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3996                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3997                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3998
3999                                 /* Disable access to flash interface */
4000                                 bnx2_disable_nvram_access(bp);
4001                                 bnx2_release_nvram_lock(bp);
4002
4003                                 break;
4004                         }
4005                 }
4006         } /* if (val & 0x40000000) */
4007
4008         if (j == entry_count) {
4009                 bp->flash_info = NULL;
4010                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4011                 return -ENODEV;
4012         }
4013
4014 get_flash_size:
4015         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4016         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4017         if (val)
4018                 bp->flash_size = val;
4019         else
4020                 bp->flash_size = bp->flash_info->total_size;
4021
4022         return rc;
4023 }
4024
4025 static int
4026 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4027                 int buf_size)
4028 {
4029         int rc = 0;
4030         u32 cmd_flags, offset32, len32, extra;
4031
4032         if (buf_size == 0)
4033                 return 0;
4034
4035         /* Request access to the flash interface. */
4036         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4037                 return rc;
4038
4039         /* Enable access to flash interface */
4040         bnx2_enable_nvram_access(bp);
4041
4042         len32 = buf_size;
4043         offset32 = offset;
4044         extra = 0;
4045
4046         cmd_flags = 0;
4047
4048         if (offset32 & 3) {
4049                 u8 buf[4];
4050                 u32 pre_len;
4051
4052                 offset32 &= ~3;
4053                 pre_len = 4 - (offset & 3);
4054
4055                 if (pre_len >= len32) {
4056                         pre_len = len32;
4057                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4058                                     BNX2_NVM_COMMAND_LAST;
4059                 }
4060                 else {
4061                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4062                 }
4063
4064                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4065
4066                 if (rc)
4067                         return rc;
4068
4069                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4070
4071                 offset32 += 4;
4072                 ret_buf += pre_len;
4073                 len32 -= pre_len;
4074         }
4075         if (len32 & 3) {
4076                 extra = 4 - (len32 & 3);
4077                 len32 = (len32 + 4) & ~3;
4078         }
4079
4080         if (len32 == 4) {
4081                 u8 buf[4];
4082
4083                 if (cmd_flags)
4084                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4085                 else
4086                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4087                                     BNX2_NVM_COMMAND_LAST;
4088
4089                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4090
4091                 memcpy(ret_buf, buf, 4 - extra);
4092         }
4093         else if (len32 > 0) {
4094                 u8 buf[4];
4095
4096                 /* Read the first word. */
4097                 if (cmd_flags)
4098                         cmd_flags = 0;
4099                 else
4100                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4101
4102                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4103
4104                 /* Advance to the next dword. */
4105                 offset32 += 4;
4106                 ret_buf += 4;
4107                 len32 -= 4;
4108
4109                 while (len32 > 4 && rc == 0) {
4110                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4111
4112                         /* Advance to the next dword. */
4113                         offset32 += 4;
4114                         ret_buf += 4;
4115                         len32 -= 4;
4116                 }
4117
4118                 if (rc)
4119                         return rc;
4120
4121                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4122                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4123
4124                 memcpy(ret_buf, buf, 4 - extra);
4125         }
4126
4127         /* Disable access to flash interface */
4128         bnx2_disable_nvram_access(bp);
4129
4130         bnx2_release_nvram_lock(bp);
4131
4132         return rc;
4133 }
4134
4135 static int
4136 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4137                 int buf_size)
4138 {
4139         u32 written, offset32, len32;
4140         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4141         int rc = 0;
4142         int align_start, align_end;
4143
4144         buf = data_buf;
4145         offset32 = offset;
4146         len32 = buf_size;
4147         align_start = align_end = 0;
4148
4149         if ((align_start = (offset32 & 3))) {
4150                 offset32 &= ~3;
4151                 len32 += align_start;
4152                 if (len32 < 4)
4153                         len32 = 4;
4154                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4155                         return rc;
4156         }
4157
4158         if (len32 & 3) {
4159                 align_end = 4 - (len32 & 3);
4160                 len32 += align_end;
4161                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4162                         return rc;
4163         }
4164
4165         if (align_start || align_end) {
4166                 align_buf = kmalloc(len32, GFP_KERNEL);
4167                 if (align_buf == NULL)
4168                         return -ENOMEM;
4169                 if (align_start) {
4170                         memcpy(align_buf, start, 4);
4171                 }
4172                 if (align_end) {
4173                         memcpy(align_buf + len32 - 4, end, 4);
4174                 }
4175                 memcpy(align_buf + align_start, data_buf, buf_size);
4176                 buf = align_buf;
4177         }
4178
4179         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4180                 flash_buffer = kmalloc(264, GFP_KERNEL);
4181                 if (flash_buffer == NULL) {
4182                         rc = -ENOMEM;
4183                         goto nvram_write_end;
4184                 }
4185         }
4186
4187         written = 0;
4188         while ((written < len32) && (rc == 0)) {
4189                 u32 page_start, page_end, data_start, data_end;
4190                 u32 addr, cmd_flags;
4191                 int i;
4192
4193                 /* Find the page_start addr */
4194                 page_start = offset32 + written;
4195                 page_start -= (page_start % bp->flash_info->page_size);
4196                 /* Find the page_end addr */
4197                 page_end = page_start + bp->flash_info->page_size;
4198                 /* Find the data_start addr */
4199                 data_start = (written == 0) ? offset32 : page_start;
4200                 /* Find the data_end addr */
4201                 data_end = (page_end > offset32 + len32) ?
4202                         (offset32 + len32) : page_end;
4203
4204                 /* Request access to the flash interface. */
4205                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4206                         goto nvram_write_end;
4207
4208                 /* Enable access to flash interface */
4209                 bnx2_enable_nvram_access(bp);
4210
4211                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4212                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4213                         int j;
4214
4215                         /* Read the whole page into the buffer
4216                          * (non-buffer flash only) */
4217                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4218                                 if (j == (bp->flash_info->page_size - 4)) {
4219                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4220                                 }
4221                                 rc = bnx2_nvram_read_dword(bp,
4222                                         page_start + j,
4223                                         &flash_buffer[j],
4224                                         cmd_flags);
4225
4226                                 if (rc)
4227                                         goto nvram_write_end;
4228
4229                                 cmd_flags = 0;
4230                         }
4231                 }
4232
4233                 /* Enable writes to flash interface (unlock write-protect) */
4234                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4235                         goto nvram_write_end;
4236
4237                 /* Loop to write back the buffer data from page_start to
4238                  * data_start */
4239                 i = 0;
4240                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4241                         /* Erase the page */
4242                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4243                                 goto nvram_write_end;
4244
4245                         /* Re-enable the write again for the actual write */
4246                         bnx2_enable_nvram_write(bp);
4247
4248                         for (addr = page_start; addr < data_start;
4249                                 addr += 4, i += 4) {
4250
4251                                 rc = bnx2_nvram_write_dword(bp, addr,
4252                                         &flash_buffer[i], cmd_flags);
4253
4254                                 if (rc != 0)
4255                                         goto nvram_write_end;
4256
4257                                 cmd_flags = 0;
4258                         }
4259                 }
4260
4261                 /* Loop to write the new data from data_start to data_end */
4262                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4263                         if ((addr == page_end - 4) ||
4264                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4265                                  (addr == data_end - 4))) {
4266
4267                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4268                         }
4269                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4270                                 cmd_flags);
4271
4272                         if (rc != 0)
4273                                 goto nvram_write_end;
4274
4275                         cmd_flags = 0;
4276                         buf += 4;
4277                 }
4278
4279                 /* Loop to write back the buffer data from data_end
4280                  * to page_end */
4281                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4282                         for (addr = data_end; addr < page_end;
4283                                 addr += 4, i += 4) {
4284
4285                                 if (addr == page_end-4) {
4286                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4287                                 }
4288                                 rc = bnx2_nvram_write_dword(bp, addr,
4289                                         &flash_buffer[i], cmd_flags);
4290
4291                                 if (rc != 0)
4292                                         goto nvram_write_end;
4293
4294                                 cmd_flags = 0;
4295                         }
4296                 }
4297
4298                 /* Disable writes to flash interface (lock write-protect) */
4299                 bnx2_disable_nvram_write(bp);
4300
4301                 /* Disable access to flash interface */
4302                 bnx2_disable_nvram_access(bp);
4303                 bnx2_release_nvram_lock(bp);
4304
4305                 /* Increment written */
4306                 written += data_end - data_start;
4307         }
4308
4309 nvram_write_end:
4310         kfree(flash_buffer);
4311         kfree(align_buf);
4312         return rc;
4313 }
4314
4315 static void
4316 bnx2_init_fw_cap(struct bnx2 *bp)
4317 {
4318         u32 val, sig = 0;
4319
4320         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4321         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4322
4323         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4324                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4325
4326         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4327         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4328                 return;
4329
4330         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4331                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4332                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4333         }
4334
4335         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4336             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4337                 u32 link;
4338
4339                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4340
4341                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4342                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4343                         bp->phy_port = PORT_FIBRE;
4344                 else
4345                         bp->phy_port = PORT_TP;
4346
4347                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4348                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4349         }
4350
4351         if (netif_running(bp->dev) && sig)
4352                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4353 }
4354
4355 static void
4356 bnx2_setup_msix_tbl(struct bnx2 *bp)
4357 {
4358         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4359
4360         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4361         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4362 }
4363
4364 static int
4365 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4366 {
4367         u32 val;
4368         int i, rc = 0;
4369         u8 old_port;
4370
4371         /* Wait for the current PCI transaction to complete before
4372          * issuing a reset. */
4373         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4374                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4375                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4376                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4377                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4378         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4379         udelay(5);
4380
4381         /* Wait for the firmware to tell us it is ok to issue a reset. */
4382         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4383
4384         /* Deposit a driver reset signature so the firmware knows that
4385          * this is a soft reset. */
4386         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4387                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4388
4389         /* Do a dummy read to force the chip to complete all current transaction
4390          * before we issue a reset. */
4391         val = REG_RD(bp, BNX2_MISC_ID);
4392
4393         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4394                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4395                 REG_RD(bp, BNX2_MISC_COMMAND);
4396                 udelay(5);
4397
4398                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4399                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4400
4401                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4402
4403         } else {
4404                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4405                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4406                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4407
4408                 /* Chip reset. */
4409                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4410
4411                 /* Reading back any register after chip reset will hang the
4412                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4413                  * of margin for write posting.
4414                  */
4415                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4416                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4417                         msleep(20);
4418
4419                 /* Reset takes approximate 30 usec */
4420                 for (i = 0; i < 10; i++) {
4421                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4422                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4423                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4424                                 break;
4425                         udelay(10);
4426                 }
4427
4428                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4429                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4430                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4431                         return -EBUSY;
4432                 }
4433         }
4434
4435         /* Make sure byte swapping is properly configured. */
4436         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4437         if (val != 0x01020304) {
4438                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4439                 return -ENODEV;
4440         }
4441
4442         /* Wait for the firmware to finish its initialization. */
4443         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4444         if (rc)
4445                 return rc;
4446
4447         spin_lock_bh(&bp->phy_lock);
4448         old_port = bp->phy_port;
4449         bnx2_init_fw_cap(bp);
4450         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4451             old_port != bp->phy_port)
4452                 bnx2_set_default_remote_link(bp);
4453         spin_unlock_bh(&bp->phy_lock);
4454
4455         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4456                 /* Adjust the voltage regular to two steps lower.  The default
4457                  * of this register is 0x0000000e. */
4458                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4459
4460                 /* Remove bad rbuf memory from the free pool. */
4461                 rc = bnx2_alloc_bad_rbuf(bp);
4462         }
4463
4464         if (bp->flags & BNX2_FLAG_USING_MSIX)
4465                 bnx2_setup_msix_tbl(bp);
4466
4467         return rc;
4468 }
4469
4470 static int
4471 bnx2_init_chip(struct bnx2 *bp)
4472 {
4473         u32 val;
4474         int rc, i;
4475
4476         /* Make sure the interrupt is not active. */
4477         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4478
4479         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4480               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4481 #ifdef __BIG_ENDIAN
4482               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4483 #endif
4484               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4485               DMA_READ_CHANS << 12 |
4486               DMA_WRITE_CHANS << 16;
4487
4488         val |= (0x2 << 20) | (1 << 11);
4489
4490         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4491                 val |= (1 << 23);
4492
4493         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4494             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4495                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4496
4497         REG_WR(bp, BNX2_DMA_CONFIG, val);
4498
4499         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4500                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4501                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4502                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4503         }
4504
4505         if (bp->flags & BNX2_FLAG_PCIX) {
4506                 u16 val16;
4507
4508                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4509                                      &val16);
4510                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4511                                       val16 & ~PCI_X_CMD_ERO);
4512         }
4513
4514         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4515                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4516                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4517                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4518
4519         /* Initialize context mapping and zero out the quick contexts.  The
4520          * context block must have already been enabled. */
4521         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4522                 rc = bnx2_init_5709_context(bp);
4523                 if (rc)
4524                         return rc;
4525         } else
4526                 bnx2_init_context(bp);
4527
4528         if ((rc = bnx2_init_cpus(bp)) != 0)
4529                 return rc;
4530
4531         bnx2_init_nvram(bp);
4532
4533         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4534
4535         val = REG_RD(bp, BNX2_MQ_CONFIG);
4536         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4537         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4538         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4539                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4540
4541         REG_WR(bp, BNX2_MQ_CONFIG, val);
4542
4543         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4544         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4545         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4546
4547         val = (BCM_PAGE_BITS - 8) << 24;
4548         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4549
4550         /* Configure page size. */
4551         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4552         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4553         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4554         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4555
4556         val = bp->mac_addr[0] +
4557               (bp->mac_addr[1] << 8) +
4558               (bp->mac_addr[2] << 16) +
4559               bp->mac_addr[3] +
4560               (bp->mac_addr[4] << 8) +
4561               (bp->mac_addr[5] << 16);
4562         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4563
4564         /* Program the MTU.  Also include 4 bytes for CRC32. */
4565         val = bp->dev->mtu + ETH_HLEN + 4;
4566         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4567                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4568         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4569
4570         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4571                 bp->bnx2_napi[i].last_status_idx = 0;
4572
4573         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4574
4575         /* Set up how to generate a link change interrupt. */
4576         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4577
4578         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4579                (u64) bp->status_blk_mapping & 0xffffffff);
4580         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4581
4582         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4583                (u64) bp->stats_blk_mapping & 0xffffffff);
4584         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4585                (u64) bp->stats_blk_mapping >> 32);
4586
4587         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4588                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4589
4590         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4591                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4592
4593         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4594                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4595
4596         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4597
4598         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4599
4600         REG_WR(bp, BNX2_HC_COM_TICKS,
4601                (bp->com_ticks_int << 16) | bp->com_ticks);
4602
4603         REG_WR(bp, BNX2_HC_CMD_TICKS,
4604                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4605
4606         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4607                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4608         else
4609                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4610         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4611
4612         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4613                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4614         else {
4615                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4616                       BNX2_HC_CONFIG_COLLECT_STATS;
4617         }
4618
4619         if (bp->irq_nvecs > 1) {
4620                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4621                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4622
4623                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4624         }
4625
4626         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4627                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4628
4629         REG_WR(bp, BNX2_HC_CONFIG, val);
4630
4631         for (i = 1; i < bp->irq_nvecs; i++) {
4632                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4633                            BNX2_HC_SB_CONFIG_1;
4634
4635                 REG_WR(bp, base,
4636                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4637                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4638                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4639
4640                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4641                         (bp->tx_quick_cons_trip_int << 16) |
4642                          bp->tx_quick_cons_trip);
4643
4644                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4645                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4646
4647                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4648                        (bp->rx_quick_cons_trip_int << 16) |
4649                         bp->rx_quick_cons_trip);
4650
4651                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4652                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4653         }
4654
4655         /* Clear internal stats counters. */
4656         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4657
4658         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4659
4660         /* Initialize the receive filter. */
4661         bnx2_set_rx_mode(bp->dev);
4662
4663         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4664                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4665                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4666                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4667         }
4668         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4669                           1, 0);
4670
4671         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4672         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4673
4674         udelay(20);
4675
4676         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4677
4678         return rc;
4679 }
4680
4681 static void
4682 bnx2_clear_ring_states(struct bnx2 *bp)
4683 {
4684         struct bnx2_napi *bnapi;
4685         struct bnx2_tx_ring_info *txr;
4686         struct bnx2_rx_ring_info *rxr;
4687         int i;
4688
4689         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4690                 bnapi = &bp->bnx2_napi[i];
4691                 txr = &bnapi->tx_ring;
4692                 rxr = &bnapi->rx_ring;
4693
4694                 txr->tx_cons = 0;
4695                 txr->hw_tx_cons = 0;
4696                 rxr->rx_prod_bseq = 0;
4697                 rxr->rx_prod = 0;
4698                 rxr->rx_cons = 0;
4699                 rxr->rx_pg_prod = 0;
4700                 rxr->rx_pg_cons = 0;
4701         }
4702 }
4703
4704 static void
4705 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4706 {
4707         u32 val, offset0, offset1, offset2, offset3;
4708         u32 cid_addr = GET_CID_ADDR(cid);
4709
4710         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4711                 offset0 = BNX2_L2CTX_TYPE_XI;
4712                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4713                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4714                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4715         } else {
4716                 offset0 = BNX2_L2CTX_TYPE;
4717                 offset1 = BNX2_L2CTX_CMD_TYPE;
4718                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4719                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4720         }
4721         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4722         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4723
4724         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4725         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4726
4727         val = (u64) txr->tx_desc_mapping >> 32;
4728         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4729
4730         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4731         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4732 }
4733
4734 static void
4735 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4736 {
4737         struct tx_bd *txbd;
4738         u32 cid = TX_CID;
4739         struct bnx2_napi *bnapi;
4740         struct bnx2_tx_ring_info *txr;
4741
4742         bnapi = &bp->bnx2_napi[ring_num];
4743         txr = &bnapi->tx_ring;
4744
4745         if (ring_num == 0)
4746                 cid = TX_CID;
4747         else
4748                 cid = TX_TSS_CID + ring_num - 1;
4749
4750         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4751
4752         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4753
4754         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4755         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4756
4757         txr->tx_prod = 0;
4758         txr->tx_prod_bseq = 0;
4759
4760         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4761         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4762
4763         bnx2_init_tx_context(bp, cid, txr);
4764 }
4765
4766 static void
4767 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4768                      int num_rings)
4769 {
4770         int i;
4771         struct rx_bd *rxbd;
4772
4773         for (i = 0; i < num_rings; i++) {
4774                 int j;
4775
4776                 rxbd = &rx_ring[i][0];
4777                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4778                         rxbd->rx_bd_len = buf_size;
4779                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4780                 }
4781                 if (i == (num_rings - 1))
4782                         j = 0;
4783                 else
4784                         j = i + 1;
4785                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4786                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4787         }
4788 }
4789
4790 static void
4791 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4792 {
4793         int i;
4794         u16 prod, ring_prod;
4795         u32 cid, rx_cid_addr, val;
4796         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4797         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4798
4799         if (ring_num == 0)
4800                 cid = RX_CID;
4801         else
4802                 cid = RX_RSS_CID + ring_num - 1;
4803
4804         rx_cid_addr = GET_CID_ADDR(cid);
4805
4806         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4807                              bp->rx_buf_use_size, bp->rx_max_ring);
4808
4809         bnx2_init_rx_context(bp, cid);
4810
4811         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4812                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4813                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4814         }
4815
4816         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4817         if (bp->rx_pg_ring_size) {
4818                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4819                                      rxr->rx_pg_desc_mapping,
4820                                      PAGE_SIZE, bp->rx_max_pg_ring);
4821                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4822                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4823                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4824                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4825
4826                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4827                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4828
4829                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4830                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4831
4832                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4833                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4834         }
4835
4836         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4837         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4838
4839         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4840         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4841
4842         ring_prod = prod = rxr->rx_pg_prod;
4843         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4844                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4845                         break;
4846                 prod = NEXT_RX_BD(prod);
4847                 ring_prod = RX_PG_RING_IDX(prod);
4848         }
4849         rxr->rx_pg_prod = prod;
4850
4851         ring_prod = prod = rxr->rx_prod;
4852         for (i = 0; i < bp->rx_ring_size; i++) {
4853                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4854                         break;
4855                 prod = NEXT_RX_BD(prod);
4856                 ring_prod = RX_RING_IDX(prod);
4857         }
4858         rxr->rx_prod = prod;
4859
4860         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4861         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4862         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4863
4864         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4865         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4866
4867         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4868 }
4869
4870 static void
4871 bnx2_init_all_rings(struct bnx2 *bp)
4872 {
4873         int i;
4874         u32 val;
4875
4876         bnx2_clear_ring_states(bp);
4877
4878         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4879         for (i = 0; i < bp->num_tx_rings; i++)
4880                 bnx2_init_tx_ring(bp, i);
4881
4882         if (bp->num_tx_rings > 1)
4883                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4884                        (TX_TSS_CID << 7));
4885
4886         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4887         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4888
4889         for (i = 0; i < bp->num_rx_rings; i++)
4890                 bnx2_init_rx_ring(bp, i);
4891
4892         if (bp->num_rx_rings > 1) {
4893                 u32 tbl_32;
4894                 u8 *tbl = (u8 *) &tbl_32;
4895
4896                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4897                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4898
4899                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4900                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4901                         if ((i % 4) == 3)
4902                                 bnx2_reg_wr_ind(bp,
4903                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4904                                                 cpu_to_be32(tbl_32));
4905                 }
4906
4907                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4908                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4909
4910                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4911
4912         }
4913 }
4914
4915 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4916 {
4917         u32 max, num_rings = 1;
4918
4919         while (ring_size > MAX_RX_DESC_CNT) {
4920                 ring_size -= MAX_RX_DESC_CNT;
4921                 num_rings++;
4922         }
4923         /* round to next power of 2 */
4924         max = max_size;
4925         while ((max & num_rings) == 0)
4926                 max >>= 1;
4927
4928         if (num_rings != max)
4929                 max <<= 1;
4930
4931         return max;
4932 }
4933
4934 static void
4935 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4936 {
4937         u32 rx_size, rx_space, jumbo_size;
4938
4939         /* 8 for CRC and VLAN */
4940         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4941
4942         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4943                 sizeof(struct skb_shared_info);
4944
4945         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4946         bp->rx_pg_ring_size = 0;
4947         bp->rx_max_pg_ring = 0;
4948         bp->rx_max_pg_ring_idx = 0;
4949         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4950                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4951
4952                 jumbo_size = size * pages;
4953                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4954                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4955
4956                 bp->rx_pg_ring_size = jumbo_size;
4957                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4958                                                         MAX_RX_PG_RINGS);
4959                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4960                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4961                 bp->rx_copy_thresh = 0;
4962         }
4963
4964         bp->rx_buf_use_size = rx_size;
4965         /* hw alignment */
4966         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4967         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4968         bp->rx_ring_size = size;
4969         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4970         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4971 }
4972
4973 static void
4974 bnx2_free_tx_skbs(struct bnx2 *bp)
4975 {
4976         int i;
4977
4978         for (i = 0; i < bp->num_tx_rings; i++) {
4979                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4980                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4981                 int j;
4982
4983                 if (txr->tx_buf_ring == NULL)
4984                         continue;
4985
4986                 for (j = 0; j < TX_DESC_CNT; ) {
4987                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
4988                         struct sk_buff *skb = tx_buf->skb;
4989
4990                         if (skb == NULL) {
4991                                 j++;
4992                                 continue;
4993                         }
4994
4995                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
4996
4997                         tx_buf->skb = NULL;
4998
4999                         j += skb_shinfo(skb)->nr_frags + 1;
5000                         dev_kfree_skb(skb);
5001                 }
5002         }
5003 }
5004
5005 static void
5006 bnx2_free_rx_skbs(struct bnx2 *bp)
5007 {
5008         int i;
5009
5010         for (i = 0; i < bp->num_rx_rings; i++) {
5011                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5012                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5013                 int j;
5014
5015                 if (rxr->rx_buf_ring == NULL)
5016                         return;
5017
5018                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5019                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5020                         struct sk_buff *skb = rx_buf->skb;
5021
5022                         if (skb == NULL)
5023                                 continue;
5024
5025                         pci_unmap_single(bp->pdev,
5026                                          pci_unmap_addr(rx_buf, mapping),
5027                                          bp->rx_buf_use_size,
5028                                          PCI_DMA_FROMDEVICE);
5029
5030                         rx_buf->skb = NULL;
5031
5032                         dev_kfree_skb(skb);
5033                 }
5034                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5035                         bnx2_free_rx_page(bp, rxr, j);
5036         }
5037 }
5038
5039 static void
5040 bnx2_free_skbs(struct bnx2 *bp)
5041 {
5042         bnx2_free_tx_skbs(bp);
5043         bnx2_free_rx_skbs(bp);
5044 }
5045
5046 static int
5047 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5048 {
5049         int rc;
5050
5051         rc = bnx2_reset_chip(bp, reset_code);
5052         bnx2_free_skbs(bp);
5053         if (rc)
5054                 return rc;
5055
5056         if ((rc = bnx2_init_chip(bp)) != 0)
5057                 return rc;
5058
5059         bnx2_init_all_rings(bp);
5060         return 0;
5061 }
5062
5063 static int
5064 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5065 {
5066         int rc;
5067
5068         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5069                 return rc;
5070
5071         spin_lock_bh(&bp->phy_lock);
5072         bnx2_init_phy(bp, reset_phy);
5073         bnx2_set_link(bp);
5074         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5075                 bnx2_remote_phy_event(bp);
5076         spin_unlock_bh(&bp->phy_lock);
5077         return 0;
5078 }
5079
5080 static int
5081 bnx2_shutdown_chip(struct bnx2 *bp)
5082 {
5083         u32 reset_code;
5084
5085         if (bp->flags & BNX2_FLAG_NO_WOL)
5086                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5087         else if (bp->wol)
5088                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5089         else
5090                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5091
5092         return bnx2_reset_chip(bp, reset_code);
5093 }
5094
5095 static int
5096 bnx2_test_registers(struct bnx2 *bp)
5097 {
5098         int ret;
5099         int i, is_5709;
5100         static const struct {
5101                 u16   offset;
5102                 u16   flags;
5103 #define BNX2_FL_NOT_5709        1
5104                 u32   rw_mask;
5105                 u32   ro_mask;
5106         } reg_tbl[] = {
5107                 { 0x006c, 0, 0x00000000, 0x0000003f },
5108                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5109                 { 0x0094, 0, 0x00000000, 0x00000000 },
5110
5111                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5112                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5113                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5114                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5115                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5116                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5117                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5118                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5119                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5120
5121                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5122                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5123                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5124                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5125                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5126                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5127
5128                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5129                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5130                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5131
5132                 { 0x1000, 0, 0x00000000, 0x00000001 },
5133                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5134
5135                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5136                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5137                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5138                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5139                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5140                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5141                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5142                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5143                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5144                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5145
5146                 { 0x1800, 0, 0x00000000, 0x00000001 },
5147                 { 0x1804, 0, 0x00000000, 0x00000003 },
5148
5149                 { 0x2800, 0, 0x00000000, 0x00000001 },
5150                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5151                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5152                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5153                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5154                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5155                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5156                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5157                 { 0x2840, 0, 0x00000000, 0xffffffff },
5158                 { 0x2844, 0, 0x00000000, 0xffffffff },
5159                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5160                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5161
5162                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5163                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5164
5165                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5166                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5167                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5168                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5169                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5170                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5171                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5172                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5173                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5174
5175                 { 0x5004, 0, 0x00000000, 0x0000007f },
5176                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5177
5178                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5179                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5180                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5181                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5182                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5183                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5184                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5185                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5186                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5187
5188                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5189                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5190                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5191                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5192                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5193                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5194                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5195                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5196                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5197                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5198                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5199                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5200                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5201                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5202                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5203                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5204                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5205                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5206                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5207                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5208                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5209                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5210                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5211
5212                 { 0xffff, 0, 0x00000000, 0x00000000 },
5213         };
5214
5215         ret = 0;
5216         is_5709 = 0;
5217         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5218                 is_5709 = 1;
5219
5220         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5221                 u32 offset, rw_mask, ro_mask, save_val, val;
5222                 u16 flags = reg_tbl[i].flags;
5223
5224                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5225                         continue;
5226
5227                 offset = (u32) reg_tbl[i].offset;
5228                 rw_mask = reg_tbl[i].rw_mask;
5229                 ro_mask = reg_tbl[i].ro_mask;
5230
5231                 save_val = readl(bp->regview + offset);
5232
5233                 writel(0, bp->regview + offset);
5234
5235                 val = readl(bp->regview + offset);
5236                 if ((val & rw_mask) != 0) {
5237                         goto reg_test_err;
5238                 }
5239
5240                 if ((val & ro_mask) != (save_val & ro_mask)) {
5241                         goto reg_test_err;
5242                 }
5243
5244                 writel(0xffffffff, bp->regview + offset);
5245
5246                 val = readl(bp->regview + offset);
5247                 if ((val & rw_mask) != rw_mask) {
5248                         goto reg_test_err;
5249                 }
5250
5251                 if ((val & ro_mask) != (save_val & ro_mask)) {
5252                         goto reg_test_err;
5253                 }
5254
5255                 writel(save_val, bp->regview + offset);
5256                 continue;
5257
5258 reg_test_err:
5259                 writel(save_val, bp->regview + offset);
5260                 ret = -ENODEV;
5261                 break;
5262         }
5263         return ret;
5264 }
5265
5266 static int
5267 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5268 {
5269         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5270                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5271         int i;
5272
5273         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5274                 u32 offset;
5275
5276                 for (offset = 0; offset < size; offset += 4) {
5277
5278                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5279
5280                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5281                                 test_pattern[i]) {
5282                                 return -ENODEV;
5283                         }
5284                 }
5285         }
5286         return 0;
5287 }
5288
5289 static int
5290 bnx2_test_memory(struct bnx2 *bp)
5291 {
5292         int ret = 0;
5293         int i;
5294         static struct mem_entry {
5295                 u32   offset;
5296                 u32   len;
5297         } mem_tbl_5706[] = {
5298                 { 0x60000,  0x4000 },
5299                 { 0xa0000,  0x3000 },
5300                 { 0xe0000,  0x4000 },
5301                 { 0x120000, 0x4000 },
5302                 { 0x1a0000, 0x4000 },
5303                 { 0x160000, 0x4000 },
5304                 { 0xffffffff, 0    },
5305         },
5306         mem_tbl_5709[] = {
5307                 { 0x60000,  0x4000 },
5308                 { 0xa0000,  0x3000 },
5309                 { 0xe0000,  0x4000 },
5310                 { 0x120000, 0x4000 },
5311                 { 0x1a0000, 0x4000 },
5312                 { 0xffffffff, 0    },
5313         };
5314         struct mem_entry *mem_tbl;
5315
5316         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5317                 mem_tbl = mem_tbl_5709;
5318         else
5319                 mem_tbl = mem_tbl_5706;
5320
5321         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5322                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5323                         mem_tbl[i].len)) != 0) {
5324                         return ret;
5325                 }
5326         }
5327
5328         return ret;
5329 }
5330
5331 #define BNX2_MAC_LOOPBACK       0
5332 #define BNX2_PHY_LOOPBACK       1
5333
5334 static int
5335 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5336 {
5337         unsigned int pkt_size, num_pkts, i;
5338         struct sk_buff *skb, *rx_skb;
5339         unsigned char *packet;
5340         u16 rx_start_idx, rx_idx;
5341         dma_addr_t map;
5342         struct tx_bd *txbd;
5343         struct sw_bd *rx_buf;
5344         struct l2_fhdr *rx_hdr;
5345         int ret = -ENODEV;
5346         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5347         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5348         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5349
5350         tx_napi = bnapi;
5351
5352         txr = &tx_napi->tx_ring;
5353         rxr = &bnapi->rx_ring;
5354         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5355                 bp->loopback = MAC_LOOPBACK;
5356                 bnx2_set_mac_loopback(bp);
5357         }
5358         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5359                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5360                         return 0;
5361
5362                 bp->loopback = PHY_LOOPBACK;
5363                 bnx2_set_phy_loopback(bp);
5364         }
5365         else
5366                 return -EINVAL;
5367
5368         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5369         skb = netdev_alloc_skb(bp->dev, pkt_size);
5370         if (!skb)
5371                 return -ENOMEM;
5372         packet = skb_put(skb, pkt_size);
5373         memcpy(packet, bp->dev->dev_addr, 6);
5374         memset(packet + 6, 0x0, 8);
5375         for (i = 14; i < pkt_size; i++)
5376                 packet[i] = (unsigned char) (i & 0xff);
5377
5378         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5379                 dev_kfree_skb(skb);
5380                 return -EIO;
5381         }
5382         map = skb_shinfo(skb)->dma_maps[0];
5383
5384         REG_WR(bp, BNX2_HC_COMMAND,
5385                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5386
5387         REG_RD(bp, BNX2_HC_COMMAND);
5388
5389         udelay(5);
5390         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5391
5392         num_pkts = 0;
5393
5394         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5395
5396         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5397         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5398         txbd->tx_bd_mss_nbytes = pkt_size;
5399         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5400
5401         num_pkts++;
5402         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5403         txr->tx_prod_bseq += pkt_size;
5404
5405         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5406         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5407
5408         udelay(100);
5409
5410         REG_WR(bp, BNX2_HC_COMMAND,
5411                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5412
5413         REG_RD(bp, BNX2_HC_COMMAND);
5414
5415         udelay(5);
5416
5417         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5418         dev_kfree_skb(skb);
5419
5420         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5421                 goto loopback_test_done;
5422
5423         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5424         if (rx_idx != rx_start_idx + num_pkts) {
5425                 goto loopback_test_done;
5426         }
5427
5428         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5429         rx_skb = rx_buf->skb;
5430
5431         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5432         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5433
5434         pci_dma_sync_single_for_cpu(bp->pdev,
5435                 pci_unmap_addr(rx_buf, mapping),
5436                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5437
5438         if (rx_hdr->l2_fhdr_status &
5439                 (L2_FHDR_ERRORS_BAD_CRC |
5440                 L2_FHDR_ERRORS_PHY_DECODE |
5441                 L2_FHDR_ERRORS_ALIGNMENT |
5442                 L2_FHDR_ERRORS_TOO_SHORT |
5443                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5444
5445                 goto loopback_test_done;
5446         }
5447
5448         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5449                 goto loopback_test_done;
5450         }
5451
5452         for (i = 14; i < pkt_size; i++) {
5453                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5454                         goto loopback_test_done;
5455                 }
5456         }
5457
5458         ret = 0;
5459
5460 loopback_test_done:
5461         bp->loopback = 0;
5462         return ret;
5463 }
5464
5465 #define BNX2_MAC_LOOPBACK_FAILED        1
5466 #define BNX2_PHY_LOOPBACK_FAILED        2
5467 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5468                                          BNX2_PHY_LOOPBACK_FAILED)
5469
5470 static int
5471 bnx2_test_loopback(struct bnx2 *bp)
5472 {
5473         int rc = 0;
5474
5475         if (!netif_running(bp->dev))
5476                 return BNX2_LOOPBACK_FAILED;
5477
5478         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5479         spin_lock_bh(&bp->phy_lock);
5480         bnx2_init_phy(bp, 1);
5481         spin_unlock_bh(&bp->phy_lock);
5482         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5483                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5484         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5485                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5486         return rc;
5487 }
5488
5489 #define NVRAM_SIZE 0x200
5490 #define CRC32_RESIDUAL 0xdebb20e3
5491
5492 static int
5493 bnx2_test_nvram(struct bnx2 *bp)
5494 {
5495         __be32 buf[NVRAM_SIZE / 4];
5496         u8 *data = (u8 *) buf;
5497         int rc = 0;
5498         u32 magic, csum;
5499
5500         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5501                 goto test_nvram_done;
5502
5503         magic = be32_to_cpu(buf[0]);
5504         if (magic != 0x669955aa) {
5505                 rc = -ENODEV;
5506                 goto test_nvram_done;
5507         }
5508
5509         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5510                 goto test_nvram_done;
5511
5512         csum = ether_crc_le(0x100, data);
5513         if (csum != CRC32_RESIDUAL) {
5514                 rc = -ENODEV;
5515                 goto test_nvram_done;
5516         }
5517
5518         csum = ether_crc_le(0x100, data + 0x100);
5519         if (csum != CRC32_RESIDUAL) {
5520                 rc = -ENODEV;
5521         }
5522
5523 test_nvram_done:
5524         return rc;
5525 }
5526
5527 static int
5528 bnx2_test_link(struct bnx2 *bp)
5529 {
5530         u32 bmsr;
5531
5532         if (!netif_running(bp->dev))
5533                 return -ENODEV;
5534
5535         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5536                 if (bp->link_up)
5537                         return 0;
5538                 return -ENODEV;
5539         }
5540         spin_lock_bh(&bp->phy_lock);
5541         bnx2_enable_bmsr1(bp);
5542         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5543         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5544         bnx2_disable_bmsr1(bp);
5545         spin_unlock_bh(&bp->phy_lock);
5546
5547         if (bmsr & BMSR_LSTATUS) {
5548                 return 0;
5549         }
5550         return -ENODEV;
5551 }
5552
5553 static int
5554 bnx2_test_intr(struct bnx2 *bp)
5555 {
5556         int i;
5557         u16 status_idx;
5558
5559         if (!netif_running(bp->dev))
5560                 return -ENODEV;
5561
5562         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5563
5564         /* This register is not touched during run-time. */
5565         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5566         REG_RD(bp, BNX2_HC_COMMAND);
5567
5568         for (i = 0; i < 10; i++) {
5569                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5570                         status_idx) {
5571
5572                         break;
5573                 }
5574
5575                 msleep_interruptible(10);
5576         }
5577         if (i < 10)
5578                 return 0;
5579
5580         return -ENODEV;
5581 }
5582
5583 /* Determining link for parallel detection. */
5584 static int
5585 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5586 {
5587         u32 mode_ctl, an_dbg, exp;
5588
5589         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5590                 return 0;
5591
5592         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5593         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5594
5595         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5596                 return 0;
5597
5598         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5599         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5600         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5601
5602         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5603                 return 0;
5604
5605         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5606         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5607         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5608
5609         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5610                 return 0;
5611
5612         return 1;
5613 }
5614
5615 static void
5616 bnx2_5706_serdes_timer(struct bnx2 *bp)
5617 {
5618         int check_link = 1;
5619
5620         spin_lock(&bp->phy_lock);
5621         if (bp->serdes_an_pending) {
5622                 bp->serdes_an_pending--;
5623                 check_link = 0;
5624         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5625                 u32 bmcr;
5626
5627                 bp->current_interval = BNX2_TIMER_INTERVAL;
5628
5629                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5630
5631                 if (bmcr & BMCR_ANENABLE) {
5632                         if (bnx2_5706_serdes_has_link(bp)) {
5633                                 bmcr &= ~BMCR_ANENABLE;
5634                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5635                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5636                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5637                         }
5638                 }
5639         }
5640         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5641                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5642                 u32 phy2;
5643
5644                 bnx2_write_phy(bp, 0x17, 0x0f01);
5645                 bnx2_read_phy(bp, 0x15, &phy2);
5646                 if (phy2 & 0x20) {
5647                         u32 bmcr;
5648
5649                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5650                         bmcr |= BMCR_ANENABLE;
5651                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5652
5653                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5654                 }
5655         } else
5656                 bp->current_interval = BNX2_TIMER_INTERVAL;
5657
5658         if (check_link) {
5659                 u32 val;
5660
5661                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5662                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5663                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5664
5665                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5666                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5667                                 bnx2_5706s_force_link_dn(bp, 1);
5668                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5669                         } else
5670                                 bnx2_set_link(bp);
5671                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5672                         bnx2_set_link(bp);
5673         }
5674         spin_unlock(&bp->phy_lock);
5675 }
5676
5677 static void
5678 bnx2_5708_serdes_timer(struct bnx2 *bp)
5679 {
5680         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5681                 return;
5682
5683         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5684                 bp->serdes_an_pending = 0;
5685                 return;
5686         }
5687
5688         spin_lock(&bp->phy_lock);
5689         if (bp->serdes_an_pending)
5690                 bp->serdes_an_pending--;
5691         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5692                 u32 bmcr;
5693
5694                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5695                 if (bmcr & BMCR_ANENABLE) {
5696                         bnx2_enable_forced_2g5(bp);
5697                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5698                 } else {
5699                         bnx2_disable_forced_2g5(bp);
5700                         bp->serdes_an_pending = 2;
5701                         bp->current_interval = BNX2_TIMER_INTERVAL;
5702                 }
5703
5704         } else
5705                 bp->current_interval = BNX2_TIMER_INTERVAL;
5706
5707         spin_unlock(&bp->phy_lock);
5708 }
5709
5710 static void
5711 bnx2_timer(unsigned long data)
5712 {
5713         struct bnx2 *bp = (struct bnx2 *) data;
5714
5715         if (!netif_running(bp->dev))
5716                 return;
5717
5718         if (atomic_read(&bp->intr_sem) != 0)
5719                 goto bnx2_restart_timer;
5720
5721         bnx2_send_heart_beat(bp);
5722
5723         bp->stats_blk->stat_FwRxDrop =
5724                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5725
5726         /* workaround occasional corrupted counters */
5727         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5728                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5729                                             BNX2_HC_COMMAND_STATS_NOW);
5730
5731         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5732                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5733                         bnx2_5706_serdes_timer(bp);
5734                 else
5735                         bnx2_5708_serdes_timer(bp);
5736         }
5737
5738 bnx2_restart_timer:
5739         mod_timer(&bp->timer, jiffies + bp->current_interval);
5740 }
5741
5742 static int
5743 bnx2_request_irq(struct bnx2 *bp)
5744 {
5745         unsigned long flags;
5746         struct bnx2_irq *irq;
5747         int rc = 0, i;
5748
5749         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5750                 flags = 0;
5751         else
5752                 flags = IRQF_SHARED;
5753
5754         for (i = 0; i < bp->irq_nvecs; i++) {
5755                 irq = &bp->irq_tbl[i];
5756                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5757                                  &bp->bnx2_napi[i]);
5758                 if (rc)
5759                         break;
5760                 irq->requested = 1;
5761         }
5762         return rc;
5763 }
5764
5765 static void
5766 bnx2_free_irq(struct bnx2 *bp)
5767 {
5768         struct bnx2_irq *irq;
5769         int i;
5770
5771         for (i = 0; i < bp->irq_nvecs; i++) {
5772                 irq = &bp->irq_tbl[i];
5773                 if (irq->requested)
5774                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5775                 irq->requested = 0;
5776         }
5777         if (bp->flags & BNX2_FLAG_USING_MSI)
5778                 pci_disable_msi(bp->pdev);
5779         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5780                 pci_disable_msix(bp->pdev);
5781
5782         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5783 }
5784
5785 static void
5786 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5787 {
5788         int i, rc;
5789         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5790
5791         bnx2_setup_msix_tbl(bp);
5792         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5793         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5794         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5795
5796         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5797                 msix_ent[i].entry = i;
5798                 msix_ent[i].vector = 0;
5799
5800                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5801                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5802         }
5803
5804         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5805         if (rc != 0)
5806                 return;
5807
5808         bp->irq_nvecs = msix_vecs;
5809         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5810         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5811                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5812 }
5813
5814 static void
5815 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5816 {
5817         int cpus = num_online_cpus();
5818         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5819
5820         bp->irq_tbl[0].handler = bnx2_interrupt;
5821         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5822         bp->irq_nvecs = 1;
5823         bp->irq_tbl[0].vector = bp->pdev->irq;
5824
5825         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5826                 bnx2_enable_msix(bp, msix_vecs);
5827
5828         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5829             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5830                 if (pci_enable_msi(bp->pdev) == 0) {
5831                         bp->flags |= BNX2_FLAG_USING_MSI;
5832                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5833                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5834                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5835                         } else
5836                                 bp->irq_tbl[0].handler = bnx2_msi;
5837
5838                         bp->irq_tbl[0].vector = bp->pdev->irq;
5839                 }
5840         }
5841
5842         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5843         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5844
5845         bp->num_rx_rings = bp->irq_nvecs;
5846 }
5847
5848 /* Called with rtnl_lock */
5849 static int
5850 bnx2_open(struct net_device *dev)
5851 {
5852         struct bnx2 *bp = netdev_priv(dev);
5853         int rc;
5854
5855         netif_carrier_off(dev);
5856
5857         bnx2_set_power_state(bp, PCI_D0);
5858         bnx2_disable_int(bp);
5859
5860         bnx2_setup_int_mode(bp, disable_msi);
5861         bnx2_napi_enable(bp);
5862         rc = bnx2_alloc_mem(bp);
5863         if (rc)
5864                 goto open_err;
5865
5866         rc = bnx2_request_irq(bp);
5867         if (rc)
5868                 goto open_err;
5869
5870         rc = bnx2_init_nic(bp, 1);
5871         if (rc)
5872                 goto open_err;
5873
5874         mod_timer(&bp->timer, jiffies + bp->current_interval);
5875
5876         atomic_set(&bp->intr_sem, 0);
5877
5878         bnx2_enable_int(bp);
5879
5880         if (bp->flags & BNX2_FLAG_USING_MSI) {
5881                 /* Test MSI to make sure it is working
5882                  * If MSI test fails, go back to INTx mode
5883                  */
5884                 if (bnx2_test_intr(bp) != 0) {
5885                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5886                                " using MSI, switching to INTx mode. Please"
5887                                " report this failure to the PCI maintainer"
5888                                " and include system chipset information.\n",
5889                                bp->dev->name);
5890
5891                         bnx2_disable_int(bp);
5892                         bnx2_free_irq(bp);
5893
5894                         bnx2_setup_int_mode(bp, 1);
5895
5896                         rc = bnx2_init_nic(bp, 0);
5897
5898                         if (!rc)
5899                                 rc = bnx2_request_irq(bp);
5900
5901                         if (rc) {
5902                                 del_timer_sync(&bp->timer);
5903                                 goto open_err;
5904                         }
5905                         bnx2_enable_int(bp);
5906                 }
5907         }
5908         if (bp->flags & BNX2_FLAG_USING_MSI)
5909                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5910         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5911                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5912
5913         netif_tx_start_all_queues(dev);
5914
5915         return 0;
5916
5917 open_err:
5918         bnx2_napi_disable(bp);
5919         bnx2_free_skbs(bp);
5920         bnx2_free_irq(bp);
5921         bnx2_free_mem(bp);
5922         return rc;
5923 }
5924
5925 static void
5926 bnx2_reset_task(struct work_struct *work)
5927 {
5928         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5929
5930         if (!netif_running(bp->dev))
5931                 return;
5932
5933         bnx2_netif_stop(bp);
5934
5935         bnx2_init_nic(bp, 1);
5936
5937         atomic_set(&bp->intr_sem, 1);
5938         bnx2_netif_start(bp);
5939 }
5940
5941 static void
5942 bnx2_tx_timeout(struct net_device *dev)
5943 {
5944         struct bnx2 *bp = netdev_priv(dev);
5945
5946         /* This allows the netif to be shutdown gracefully before resetting */
5947         schedule_work(&bp->reset_task);
5948 }
5949
5950 #ifdef BCM_VLAN
5951 /* Called with rtnl_lock */
5952 static void
5953 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5954 {
5955         struct bnx2 *bp = netdev_priv(dev);
5956
5957         bnx2_netif_stop(bp);
5958
5959         bp->vlgrp = vlgrp;
5960         bnx2_set_rx_mode(dev);
5961         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
5962                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
5963
5964         bnx2_netif_start(bp);
5965 }
5966 #endif
5967
5968 /* Called with netif_tx_lock.
5969  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5970  * netif_wake_queue().
5971  */
5972 static int
5973 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5974 {
5975         struct bnx2 *bp = netdev_priv(dev);
5976         dma_addr_t mapping;
5977         struct tx_bd *txbd;
5978         struct sw_tx_bd *tx_buf;
5979         u32 len, vlan_tag_flags, last_frag, mss;
5980         u16 prod, ring_prod;
5981         int i;
5982         struct bnx2_napi *bnapi;
5983         struct bnx2_tx_ring_info *txr;
5984         struct netdev_queue *txq;
5985         struct skb_shared_info *sp;
5986
5987         /*  Determine which tx ring we will be placed on */
5988         i = skb_get_queue_mapping(skb);
5989         bnapi = &bp->bnx2_napi[i];
5990         txr = &bnapi->tx_ring;
5991         txq = netdev_get_tx_queue(dev, i);
5992
5993         if (unlikely(bnx2_tx_avail(bp, txr) <
5994             (skb_shinfo(skb)->nr_frags + 1))) {
5995                 netif_tx_stop_queue(txq);
5996                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5997                         dev->name);
5998
5999                 return NETDEV_TX_BUSY;
6000         }
6001         len = skb_headlen(skb);
6002         prod = txr->tx_prod;
6003         ring_prod = TX_RING_IDX(prod);
6004
6005         vlan_tag_flags = 0;
6006         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6007                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6008         }
6009
6010 #ifdef BCM_VLAN
6011         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6012                 vlan_tag_flags |=
6013                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6014         }
6015 #endif
6016         if ((mss = skb_shinfo(skb)->gso_size)) {
6017                 u32 tcp_opt_len;
6018                 struct iphdr *iph;
6019
6020                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6021
6022                 tcp_opt_len = tcp_optlen(skb);
6023
6024                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6025                         u32 tcp_off = skb_transport_offset(skb) -
6026                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6027
6028                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6029                                           TX_BD_FLAGS_SW_FLAGS;
6030                         if (likely(tcp_off == 0))
6031                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6032                         else {
6033                                 tcp_off >>= 3;
6034                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6035                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6036                                                   ((tcp_off & 0x10) <<
6037                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6038                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6039                         }
6040                 } else {
6041                         iph = ip_hdr(skb);
6042                         if (tcp_opt_len || (iph->ihl > 5)) {
6043                                 vlan_tag_flags