bnx2: Update firmware to 5.0.0.j3.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.2"
63 #define DRV_MODULE_RELDATE      "Aug 21, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         cp->drv_state = 0;
405         bnapi->cnic_present = 0;
406         rcu_assign_pointer(bp->cnic_ops, NULL);
407         synchronize_rcu();
408         return 0;
409 }
410
411 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
412 {
413         struct bnx2 *bp = netdev_priv(dev);
414         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
415
416         cp->drv_owner = THIS_MODULE;
417         cp->chip_id = bp->chip_id;
418         cp->pdev = bp->pdev;
419         cp->io_base = bp->regview;
420         cp->drv_ctl = bnx2_drv_ctl;
421         cp->drv_register_cnic = bnx2_register_cnic;
422         cp->drv_unregister_cnic = bnx2_unregister_cnic;
423
424         return cp;
425 }
426 EXPORT_SYMBOL(bnx2_cnic_probe);
427
428 static void
429 bnx2_cnic_stop(struct bnx2 *bp)
430 {
431         struct cnic_ops *c_ops;
432         struct cnic_ctl_info info;
433
434         rcu_read_lock();
435         c_ops = rcu_dereference(bp->cnic_ops);
436         if (c_ops) {
437                 info.cmd = CNIC_CTL_STOP_CMD;
438                 c_ops->cnic_ctl(bp->cnic_data, &info);
439         }
440         rcu_read_unlock();
441 }
442
443 static void
444 bnx2_cnic_start(struct bnx2 *bp)
445 {
446         struct cnic_ops *c_ops;
447         struct cnic_ctl_info info;
448
449         rcu_read_lock();
450         c_ops = rcu_dereference(bp->cnic_ops);
451         if (c_ops) {
452                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
453                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
454
455                         bnapi->cnic_tag = bnapi->last_status_idx;
456                 }
457                 info.cmd = CNIC_CTL_START_CMD;
458                 c_ops->cnic_ctl(bp->cnic_data, &info);
459         }
460         rcu_read_unlock();
461 }
462
463 #else
464
465 static void
466 bnx2_cnic_stop(struct bnx2 *bp)
467 {
468 }
469
470 static void
471 bnx2_cnic_start(struct bnx2 *bp)
472 {
473 }
474
475 #endif
476
477 static int
478 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
479 {
480         u32 val1;
481         int i, ret;
482
483         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
484                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
485                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
486
487                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
488                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489
490                 udelay(40);
491         }
492
493         val1 = (bp->phy_addr << 21) | (reg << 16) |
494                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
495                 BNX2_EMAC_MDIO_COMM_START_BUSY;
496         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
497
498         for (i = 0; i < 50; i++) {
499                 udelay(10);
500
501                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
502                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
503                         udelay(5);
504
505                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
507
508                         break;
509                 }
510         }
511
512         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
513                 *val = 0x0;
514                 ret = -EBUSY;
515         }
516         else {
517                 *val = val1;
518                 ret = 0;
519         }
520
521         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
522                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
523                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
524
525                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
526                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527
528                 udelay(40);
529         }
530
531         return ret;
532 }
533
534 static int
535 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
536 {
537         u32 val1;
538         int i, ret;
539
540         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
541                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
542                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
543
544                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
545                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546
547                 udelay(40);
548         }
549
550         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
551                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
552                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
553         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
554
555         for (i = 0; i < 50; i++) {
556                 udelay(10);
557
558                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
559                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
560                         udelay(5);
561                         break;
562                 }
563         }
564
565         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
566                 ret = -EBUSY;
567         else
568                 ret = 0;
569
570         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
571                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
572                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
573
574                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
575                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576
577                 udelay(40);
578         }
579
580         return ret;
581 }
582
583 static void
584 bnx2_disable_int(struct bnx2 *bp)
585 {
586         int i;
587         struct bnx2_napi *bnapi;
588
589         for (i = 0; i < bp->irq_nvecs; i++) {
590                 bnapi = &bp->bnx2_napi[i];
591                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
592                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
593         }
594         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
595 }
596
597 static void
598 bnx2_enable_int(struct bnx2 *bp)
599 {
600         int i;
601         struct bnx2_napi *bnapi;
602
603         for (i = 0; i < bp->irq_nvecs; i++) {
604                 bnapi = &bp->bnx2_napi[i];
605
606                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
608                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
609                        bnapi->last_status_idx);
610
611                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613                        bnapi->last_status_idx);
614         }
615         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
616 }
617
618 static void
619 bnx2_disable_int_sync(struct bnx2 *bp)
620 {
621         int i;
622
623         atomic_inc(&bp->intr_sem);
624         if (!netif_running(bp->dev))
625                 return;
626
627         bnx2_disable_int(bp);
628         for (i = 0; i < bp->irq_nvecs; i++)
629                 synchronize_irq(bp->irq_tbl[i].vector);
630 }
631
632 static void
633 bnx2_napi_disable(struct bnx2 *bp)
634 {
635         int i;
636
637         for (i = 0; i < bp->irq_nvecs; i++)
638                 napi_disable(&bp->bnx2_napi[i].napi);
639 }
640
641 static void
642 bnx2_napi_enable(struct bnx2 *bp)
643 {
644         int i;
645
646         for (i = 0; i < bp->irq_nvecs; i++)
647                 napi_enable(&bp->bnx2_napi[i].napi);
648 }
649
650 static void
651 bnx2_netif_stop(struct bnx2 *bp)
652 {
653         bnx2_cnic_stop(bp);
654         bnx2_disable_int_sync(bp);
655         if (netif_running(bp->dev)) {
656                 bnx2_napi_disable(bp);
657                 netif_tx_disable(bp->dev);
658                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
659         }
660 }
661
662 static void
663 bnx2_netif_start(struct bnx2 *bp)
664 {
665         if (atomic_dec_and_test(&bp->intr_sem)) {
666                 if (netif_running(bp->dev)) {
667                         netif_tx_wake_all_queues(bp->dev);
668                         bnx2_napi_enable(bp);
669                         bnx2_enable_int(bp);
670                         bnx2_cnic_start(bp);
671                 }
672         }
673 }
674
675 static void
676 bnx2_free_tx_mem(struct bnx2 *bp)
677 {
678         int i;
679
680         for (i = 0; i < bp->num_tx_rings; i++) {
681                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
682                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
683
684                 if (txr->tx_desc_ring) {
685                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
686                                             txr->tx_desc_ring,
687                                             txr->tx_desc_mapping);
688                         txr->tx_desc_ring = NULL;
689                 }
690                 kfree(txr->tx_buf_ring);
691                 txr->tx_buf_ring = NULL;
692         }
693 }
694
695 static void
696 bnx2_free_rx_mem(struct bnx2 *bp)
697 {
698         int i;
699
700         for (i = 0; i < bp->num_rx_rings; i++) {
701                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
702                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
703                 int j;
704
705                 for (j = 0; j < bp->rx_max_ring; j++) {
706                         if (rxr->rx_desc_ring[j])
707                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
708                                                     rxr->rx_desc_ring[j],
709                                                     rxr->rx_desc_mapping[j]);
710                         rxr->rx_desc_ring[j] = NULL;
711                 }
712                 vfree(rxr->rx_buf_ring);
713                 rxr->rx_buf_ring = NULL;
714
715                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
716                         if (rxr->rx_pg_desc_ring[j])
717                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718                                                     rxr->rx_pg_desc_ring[j],
719                                                     rxr->rx_pg_desc_mapping[j]);
720                         rxr->rx_pg_desc_ring[j] = NULL;
721                 }
722                 vfree(rxr->rx_pg_ring);
723                 rxr->rx_pg_ring = NULL;
724         }
725 }
726
727 static int
728 bnx2_alloc_tx_mem(struct bnx2 *bp)
729 {
730         int i;
731
732         for (i = 0; i < bp->num_tx_rings; i++) {
733                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
734                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
735
736                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
737                 if (txr->tx_buf_ring == NULL)
738                         return -ENOMEM;
739
740                 txr->tx_desc_ring =
741                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
742                                              &txr->tx_desc_mapping);
743                 if (txr->tx_desc_ring == NULL)
744                         return -ENOMEM;
745         }
746         return 0;
747 }
748
749 static int
750 bnx2_alloc_rx_mem(struct bnx2 *bp)
751 {
752         int i;
753
754         for (i = 0; i < bp->num_rx_rings; i++) {
755                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
756                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
757                 int j;
758
759                 rxr->rx_buf_ring =
760                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
761                 if (rxr->rx_buf_ring == NULL)
762                         return -ENOMEM;
763
764                 memset(rxr->rx_buf_ring, 0,
765                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
766
767                 for (j = 0; j < bp->rx_max_ring; j++) {
768                         rxr->rx_desc_ring[j] =
769                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
770                                                      &rxr->rx_desc_mapping[j]);
771                         if (rxr->rx_desc_ring[j] == NULL)
772                                 return -ENOMEM;
773
774                 }
775
776                 if (bp->rx_pg_ring_size) {
777                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
778                                                   bp->rx_max_pg_ring);
779                         if (rxr->rx_pg_ring == NULL)
780                                 return -ENOMEM;
781
782                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
783                                bp->rx_max_pg_ring);
784                 }
785
786                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
787                         rxr->rx_pg_desc_ring[j] =
788                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
789                                                 &rxr->rx_pg_desc_mapping[j]);
790                         if (rxr->rx_pg_desc_ring[j] == NULL)
791                                 return -ENOMEM;
792
793                 }
794         }
795         return 0;
796 }
797
798 static void
799 bnx2_free_mem(struct bnx2 *bp)
800 {
801         int i;
802         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
803
804         bnx2_free_tx_mem(bp);
805         bnx2_free_rx_mem(bp);
806
807         for (i = 0; i < bp->ctx_pages; i++) {
808                 if (bp->ctx_blk[i]) {
809                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
810                                             bp->ctx_blk[i],
811                                             bp->ctx_blk_mapping[i]);
812                         bp->ctx_blk[i] = NULL;
813                 }
814         }
815         if (bnapi->status_blk.msi) {
816                 pci_free_consistent(bp->pdev, bp->status_stats_size,
817                                     bnapi->status_blk.msi,
818                                     bp->status_blk_mapping);
819                 bnapi->status_blk.msi = NULL;
820                 bp->stats_blk = NULL;
821         }
822 }
823
824 static int
825 bnx2_alloc_mem(struct bnx2 *bp)
826 {
827         int i, status_blk_size, err;
828         struct bnx2_napi *bnapi;
829         void *status_blk;
830
831         /* Combine status and statistics blocks into one allocation. */
832         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
833         if (bp->flags & BNX2_FLAG_MSIX_CAP)
834                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
835                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
836         bp->status_stats_size = status_blk_size +
837                                 sizeof(struct statistics_block);
838
839         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
840                                           &bp->status_blk_mapping);
841         if (status_blk == NULL)
842                 goto alloc_mem_err;
843
844         memset(status_blk, 0, bp->status_stats_size);
845
846         bnapi = &bp->bnx2_napi[0];
847         bnapi->status_blk.msi = status_blk;
848         bnapi->hw_tx_cons_ptr =
849                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
850         bnapi->hw_rx_cons_ptr =
851                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
852         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
853                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
854                         struct status_block_msix *sblk;
855
856                         bnapi = &bp->bnx2_napi[i];
857
858                         sblk = (void *) (status_blk +
859                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
860                         bnapi->status_blk.msix = sblk;
861                         bnapi->hw_tx_cons_ptr =
862                                 &sblk->status_tx_quick_consumer_index;
863                         bnapi->hw_rx_cons_ptr =
864                                 &sblk->status_rx_quick_consumer_index;
865                         bnapi->int_num = i << 24;
866                 }
867         }
868
869         bp->stats_blk = status_blk + status_blk_size;
870
871         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
872
873         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
874                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
875                 if (bp->ctx_pages == 0)
876                         bp->ctx_pages = 1;
877                 for (i = 0; i < bp->ctx_pages; i++) {
878                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
879                                                 BCM_PAGE_SIZE,
880                                                 &bp->ctx_blk_mapping[i]);
881                         if (bp->ctx_blk[i] == NULL)
882                                 goto alloc_mem_err;
883                 }
884         }
885
886         err = bnx2_alloc_rx_mem(bp);
887         if (err)
888                 goto alloc_mem_err;
889
890         err = bnx2_alloc_tx_mem(bp);
891         if (err)
892                 goto alloc_mem_err;
893
894         return 0;
895
896 alloc_mem_err:
897         bnx2_free_mem(bp);
898         return -ENOMEM;
899 }
900
901 static void
902 bnx2_report_fw_link(struct bnx2 *bp)
903 {
904         u32 fw_link_status = 0;
905
906         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
907                 return;
908
909         if (bp->link_up) {
910                 u32 bmsr;
911
912                 switch (bp->line_speed) {
913                 case SPEED_10:
914                         if (bp->duplex == DUPLEX_HALF)
915                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
916                         else
917                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
918                         break;
919                 case SPEED_100:
920                         if (bp->duplex == DUPLEX_HALF)
921                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
922                         else
923                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
924                         break;
925                 case SPEED_1000:
926                         if (bp->duplex == DUPLEX_HALF)
927                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
928                         else
929                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
930                         break;
931                 case SPEED_2500:
932                         if (bp->duplex == DUPLEX_HALF)
933                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
934                         else
935                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
936                         break;
937                 }
938
939                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
940
941                 if (bp->autoneg) {
942                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
943
944                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
945                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
946
947                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
948                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
949                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
950                         else
951                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
952                 }
953         }
954         else
955                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
956
957         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
958 }
959
960 static char *
961 bnx2_xceiver_str(struct bnx2 *bp)
962 {
963         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
964                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
965                  "Copper"));
966 }
967
968 static void
969 bnx2_report_link(struct bnx2 *bp)
970 {
971         if (bp->link_up) {
972                 netif_carrier_on(bp->dev);
973                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
974                        bnx2_xceiver_str(bp));
975
976                 printk("%d Mbps ", bp->line_speed);
977
978                 if (bp->duplex == DUPLEX_FULL)
979                         printk("full duplex");
980                 else
981                         printk("half duplex");
982
983                 if (bp->flow_ctrl) {
984                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
985                                 printk(", receive ");
986                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
987                                         printk("& transmit ");
988                         }
989                         else {
990                                 printk(", transmit ");
991                         }
992                         printk("flow control ON");
993                 }
994                 printk("\n");
995         }
996         else {
997                 netif_carrier_off(bp->dev);
998                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
999                        bnx2_xceiver_str(bp));
1000         }
1001
1002         bnx2_report_fw_link(bp);
1003 }
1004
1005 static void
1006 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1007 {
1008         u32 local_adv, remote_adv;
1009
1010         bp->flow_ctrl = 0;
1011         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1012                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1013
1014                 if (bp->duplex == DUPLEX_FULL) {
1015                         bp->flow_ctrl = bp->req_flow_ctrl;
1016                 }
1017                 return;
1018         }
1019
1020         if (bp->duplex != DUPLEX_FULL) {
1021                 return;
1022         }
1023
1024         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1025             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1026                 u32 val;
1027
1028                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1029                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1030                         bp->flow_ctrl |= FLOW_CTRL_TX;
1031                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_RX;
1033                 return;
1034         }
1035
1036         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1037         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1038
1039         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1040                 u32 new_local_adv = 0;
1041                 u32 new_remote_adv = 0;
1042
1043                 if (local_adv & ADVERTISE_1000XPAUSE)
1044                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1045                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1046                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1047                 if (remote_adv & ADVERTISE_1000XPAUSE)
1048                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1049                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1050                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1051
1052                 local_adv = new_local_adv;
1053                 remote_adv = new_remote_adv;
1054         }
1055
1056         /* See Table 28B-3 of 802.3ab-1999 spec. */
1057         if (local_adv & ADVERTISE_PAUSE_CAP) {
1058                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1059                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1060                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1061                         }
1062                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1063                                 bp->flow_ctrl = FLOW_CTRL_RX;
1064                         }
1065                 }
1066                 else {
1067                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069                         }
1070                 }
1071         }
1072         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1073                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1074                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1075
1076                         bp->flow_ctrl = FLOW_CTRL_TX;
1077                 }
1078         }
1079 }
1080
1081 static int
1082 bnx2_5709s_linkup(struct bnx2 *bp)
1083 {
1084         u32 val, speed;
1085
1086         bp->link_up = 1;
1087
1088         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1089         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1091
1092         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1093                 bp->line_speed = bp->req_line_speed;
1094                 bp->duplex = bp->req_duplex;
1095                 return 0;
1096         }
1097         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1098         switch (speed) {
1099                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1100                         bp->line_speed = SPEED_10;
1101                         break;
1102                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1103                         bp->line_speed = SPEED_100;
1104                         break;
1105                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1106                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1107                         bp->line_speed = SPEED_1000;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1110                         bp->line_speed = SPEED_2500;
1111                         break;
1112         }
1113         if (val & MII_BNX2_GP_TOP_AN_FD)
1114                 bp->duplex = DUPLEX_FULL;
1115         else
1116                 bp->duplex = DUPLEX_HALF;
1117         return 0;
1118 }
1119
1120 static int
1121 bnx2_5708s_linkup(struct bnx2 *bp)
1122 {
1123         u32 val;
1124
1125         bp->link_up = 1;
1126         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1127         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1128                 case BCM5708S_1000X_STAT1_SPEED_10:
1129                         bp->line_speed = SPEED_10;
1130                         break;
1131                 case BCM5708S_1000X_STAT1_SPEED_100:
1132                         bp->line_speed = SPEED_100;
1133                         break;
1134                 case BCM5708S_1000X_STAT1_SPEED_1G:
1135                         bp->line_speed = SPEED_1000;
1136                         break;
1137                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1138                         bp->line_speed = SPEED_2500;
1139                         break;
1140         }
1141         if (val & BCM5708S_1000X_STAT1_FD)
1142                 bp->duplex = DUPLEX_FULL;
1143         else
1144                 bp->duplex = DUPLEX_HALF;
1145
1146         return 0;
1147 }
1148
1149 static int
1150 bnx2_5706s_linkup(struct bnx2 *bp)
1151 {
1152         u32 bmcr, local_adv, remote_adv, common;
1153
1154         bp->link_up = 1;
1155         bp->line_speed = SPEED_1000;
1156
1157         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1158         if (bmcr & BMCR_FULLDPLX) {
1159                 bp->duplex = DUPLEX_FULL;
1160         }
1161         else {
1162                 bp->duplex = DUPLEX_HALF;
1163         }
1164
1165         if (!(bmcr & BMCR_ANENABLE)) {
1166                 return 0;
1167         }
1168
1169         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1170         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1171
1172         common = local_adv & remote_adv;
1173         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1174
1175                 if (common & ADVERTISE_1000XFULL) {
1176                         bp->duplex = DUPLEX_FULL;
1177                 }
1178                 else {
1179                         bp->duplex = DUPLEX_HALF;
1180                 }
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int
1187 bnx2_copper_linkup(struct bnx2 *bp)
1188 {
1189         u32 bmcr;
1190
1191         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1192         if (bmcr & BMCR_ANENABLE) {
1193                 u32 local_adv, remote_adv, common;
1194
1195                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1196                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1197
1198                 common = local_adv & (remote_adv >> 2);
1199                 if (common & ADVERTISE_1000FULL) {
1200                         bp->line_speed = SPEED_1000;
1201                         bp->duplex = DUPLEX_FULL;
1202                 }
1203                 else if (common & ADVERTISE_1000HALF) {
1204                         bp->line_speed = SPEED_1000;
1205                         bp->duplex = DUPLEX_HALF;
1206                 }
1207                 else {
1208                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1209                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1210
1211                         common = local_adv & remote_adv;
1212                         if (common & ADVERTISE_100FULL) {
1213                                 bp->line_speed = SPEED_100;
1214                                 bp->duplex = DUPLEX_FULL;
1215                         }
1216                         else if (common & ADVERTISE_100HALF) {
1217                                 bp->line_speed = SPEED_100;
1218                                 bp->duplex = DUPLEX_HALF;
1219                         }
1220                         else if (common & ADVERTISE_10FULL) {
1221                                 bp->line_speed = SPEED_10;
1222                                 bp->duplex = DUPLEX_FULL;
1223                         }
1224                         else if (common & ADVERTISE_10HALF) {
1225                                 bp->line_speed = SPEED_10;
1226                                 bp->duplex = DUPLEX_HALF;
1227                         }
1228                         else {
1229                                 bp->line_speed = 0;
1230                                 bp->link_up = 0;
1231                         }
1232                 }
1233         }
1234         else {
1235                 if (bmcr & BMCR_SPEED100) {
1236                         bp->line_speed = SPEED_100;
1237                 }
1238                 else {
1239                         bp->line_speed = SPEED_10;
1240                 }
1241                 if (bmcr & BMCR_FULLDPLX) {
1242                         bp->duplex = DUPLEX_FULL;
1243                 }
1244                 else {
1245                         bp->duplex = DUPLEX_HALF;
1246                 }
1247         }
1248
1249         return 0;
1250 }
1251
1252 static void
1253 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1254 {
1255         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1256
1257         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1258         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1259         val |= 0x02 << 8;
1260
1261         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1262                 u32 lo_water, hi_water;
1263
1264                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1265                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1266                 else
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1268                 if (lo_water >= bp->rx_ring_size)
1269                         lo_water = 0;
1270
1271                 hi_water = bp->rx_ring_size / 4;
1272
1273                 if (hi_water <= lo_water)
1274                         lo_water = 0;
1275
1276                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1277                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1278
1279                 if (hi_water > 0xf)
1280                         hi_water = 0xf;
1281                 else if (hi_water == 0)
1282                         lo_water = 0;
1283                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1284         }
1285         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1286 }
1287
1288 static void
1289 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1290 {
1291         int i;
1292         u32 cid;
1293
1294         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1295                 if (i == 1)
1296                         cid = RX_RSS_CID;
1297                 bnx2_init_rx_context(bp, cid);
1298         }
1299 }
1300
1301 static void
1302 bnx2_set_mac_link(struct bnx2 *bp)
1303 {
1304         u32 val;
1305
1306         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1307         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1308                 (bp->duplex == DUPLEX_HALF)) {
1309                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1310         }
1311
1312         /* Configure the EMAC mode register. */
1313         val = REG_RD(bp, BNX2_EMAC_MODE);
1314
1315         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1316                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1317                 BNX2_EMAC_MODE_25G_MODE);
1318
1319         if (bp->link_up) {
1320                 switch (bp->line_speed) {
1321                         case SPEED_10:
1322                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1323                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1324                                         break;
1325                                 }
1326                                 /* fall through */
1327                         case SPEED_100:
1328                                 val |= BNX2_EMAC_MODE_PORT_MII;
1329                                 break;
1330                         case SPEED_2500:
1331                                 val |= BNX2_EMAC_MODE_25G_MODE;
1332                                 /* fall through */
1333                         case SPEED_1000:
1334                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1335                                 break;
1336                 }
1337         }
1338         else {
1339                 val |= BNX2_EMAC_MODE_PORT_GMII;
1340         }
1341
1342         /* Set the MAC to operate in the appropriate duplex mode. */
1343         if (bp->duplex == DUPLEX_HALF)
1344                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1345         REG_WR(bp, BNX2_EMAC_MODE, val);
1346
1347         /* Enable/disable rx PAUSE. */
1348         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1349
1350         if (bp->flow_ctrl & FLOW_CTRL_RX)
1351                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1352         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1353
1354         /* Enable/disable tx PAUSE. */
1355         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1356         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1357
1358         if (bp->flow_ctrl & FLOW_CTRL_TX)
1359                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1360         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1361
1362         /* Acknowledge the interrupt. */
1363         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1364
1365         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1366                 bnx2_init_all_rx_contexts(bp);
1367 }
1368
1369 static void
1370 bnx2_enable_bmsr1(struct bnx2 *bp)
1371 {
1372         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1373             (CHIP_NUM(bp) == CHIP_NUM_5709))
1374                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1375                                MII_BNX2_BLK_ADDR_GP_STATUS);
1376 }
1377
1378 static void
1379 bnx2_disable_bmsr1(struct bnx2 *bp)
1380 {
1381         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382             (CHIP_NUM(bp) == CHIP_NUM_5709))
1383                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1384                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1385 }
1386
1387 static int
1388 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1389 {
1390         u32 up1;
1391         int ret = 1;
1392
1393         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1394                 return 0;
1395
1396         if (bp->autoneg & AUTONEG_SPEED)
1397                 bp->advertising |= ADVERTISED_2500baseX_Full;
1398
1399         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1401
1402         bnx2_read_phy(bp, bp->mii_up1, &up1);
1403         if (!(up1 & BCM5708S_UP1_2G5)) {
1404                 up1 |= BCM5708S_UP1_2G5;
1405                 bnx2_write_phy(bp, bp->mii_up1, up1);
1406                 ret = 0;
1407         }
1408
1409         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1411                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1412
1413         return ret;
1414 }
1415
1416 static int
1417 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1418 {
1419         u32 up1;
1420         int ret = 0;
1421
1422         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1423                 return 0;
1424
1425         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1427
1428         bnx2_read_phy(bp, bp->mii_up1, &up1);
1429         if (up1 & BCM5708S_UP1_2G5) {
1430                 up1 &= ~BCM5708S_UP1_2G5;
1431                 bnx2_write_phy(bp, bp->mii_up1, up1);
1432                 ret = 1;
1433         }
1434
1435         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1436                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1437                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1438
1439         return ret;
1440 }
1441
1442 static void
1443 bnx2_enable_forced_2g5(struct bnx2 *bp)
1444 {
1445         u32 bmcr;
1446
1447         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1448                 return;
1449
1450         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1451                 u32 val;
1452
1453                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1455                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1456                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1457                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1458                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463
1464         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1465                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1467         }
1468
1469         if (bp->autoneg & AUTONEG_SPEED) {
1470                 bmcr &= ~BMCR_ANENABLE;
1471                 if (bp->req_duplex == DUPLEX_FULL)
1472                         bmcr |= BMCR_FULLDPLX;
1473         }
1474         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1475 }
1476
1477 static void
1478 bnx2_disable_forced_2g5(struct bnx2 *bp)
1479 {
1480         u32 bmcr;
1481
1482         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483                 return;
1484
1485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486                 u32 val;
1487
1488                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1490                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1491                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1492                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493
1494                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1501         }
1502
1503         if (bp->autoneg & AUTONEG_SPEED)
1504                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1505         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1506 }
1507
1508 static void
1509 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1510 {
1511         u32 val;
1512
1513         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1514         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1515         if (start)
1516                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1517         else
1518                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1519 }
1520
1521 static int
1522 bnx2_set_link(struct bnx2 *bp)
1523 {
1524         u32 bmsr;
1525         u8 link_up;
1526
1527         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1528                 bp->link_up = 1;
1529                 return 0;
1530         }
1531
1532         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1533                 return 0;
1534
1535         link_up = bp->link_up;
1536
1537         bnx2_enable_bmsr1(bp);
1538         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1539         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1540         bnx2_disable_bmsr1(bp);
1541
1542         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1543             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1544                 u32 val, an_dbg;
1545
1546                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1547                         bnx2_5706s_force_link_dn(bp, 0);
1548                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1549                 }
1550                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1551
1552                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1553                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1554                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1555
1556                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1557                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1558                         bmsr |= BMSR_LSTATUS;
1559                 else
1560                         bmsr &= ~BMSR_LSTATUS;
1561         }
1562
1563         if (bmsr & BMSR_LSTATUS) {
1564                 bp->link_up = 1;
1565
1566                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1567                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1568                                 bnx2_5706s_linkup(bp);
1569                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1570                                 bnx2_5708s_linkup(bp);
1571                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1572                                 bnx2_5709s_linkup(bp);
1573                 }
1574                 else {
1575                         bnx2_copper_linkup(bp);
1576                 }
1577                 bnx2_resolve_flow_ctrl(bp);
1578         }
1579         else {
1580                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1581                     (bp->autoneg & AUTONEG_SPEED))
1582                         bnx2_disable_forced_2g5(bp);
1583
1584                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1585                         u32 bmcr;
1586
1587                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588                         bmcr |= BMCR_ANENABLE;
1589                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1590
1591                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1592                 }
1593                 bp->link_up = 0;
1594         }
1595
1596         if (bp->link_up != link_up) {
1597                 bnx2_report_link(bp);
1598         }
1599
1600         bnx2_set_mac_link(bp);
1601
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_reset_phy(struct bnx2 *bp)
1607 {
1608         int i;
1609         u32 reg;
1610
1611         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1612
1613 #define PHY_RESET_MAX_WAIT 100
1614         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1615                 udelay(10);
1616
1617                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1618                 if (!(reg & BMCR_RESET)) {
1619                         udelay(20);
1620                         break;
1621                 }
1622         }
1623         if (i == PHY_RESET_MAX_WAIT) {
1624                 return -EBUSY;
1625         }
1626         return 0;
1627 }
1628
1629 static u32
1630 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1631 {
1632         u32 adv = 0;
1633
1634         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1635                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1636
1637                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1638                         adv = ADVERTISE_1000XPAUSE;
1639                 }
1640                 else {
1641                         adv = ADVERTISE_PAUSE_CAP;
1642                 }
1643         }
1644         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1645                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1646                         adv = ADVERTISE_1000XPSE_ASYM;
1647                 }
1648                 else {
1649                         adv = ADVERTISE_PAUSE_ASYM;
1650                 }
1651         }
1652         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1653                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1655                 }
1656                 else {
1657                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1658                 }
1659         }
1660         return adv;
1661 }
1662
1663 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1664
1665 static int
1666 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1667 __releases(&bp->phy_lock)
1668 __acquires(&bp->phy_lock)
1669 {
1670         u32 speed_arg = 0, pause_adv;
1671
1672         pause_adv = bnx2_phy_get_pause_adv(bp);
1673
1674         if (bp->autoneg & AUTONEG_SPEED) {
1675                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1676                 if (bp->advertising & ADVERTISED_10baseT_Half)
1677                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1678                 if (bp->advertising & ADVERTISED_10baseT_Full)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1680                 if (bp->advertising & ADVERTISED_100baseT_Half)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1682                 if (bp->advertising & ADVERTISED_100baseT_Full)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1684                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1686                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688         } else {
1689                 if (bp->req_line_speed == SPEED_2500)
1690                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1691                 else if (bp->req_line_speed == SPEED_1000)
1692                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 else if (bp->req_line_speed == SPEED_100) {
1694                         if (bp->req_duplex == DUPLEX_FULL)
1695                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1696                         else
1697                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1698                 } else if (bp->req_line_speed == SPEED_10) {
1699                         if (bp->req_duplex == DUPLEX_FULL)
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                         else
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1703                 }
1704         }
1705
1706         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1708         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1709                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1710
1711         if (port == PORT_TP)
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1713                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1714
1715         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1716
1717         spin_unlock_bh(&bp->phy_lock);
1718         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1719         spin_lock_bh(&bp->phy_lock);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1726 __releases(&bp->phy_lock)
1727 __acquires(&bp->phy_lock)
1728 {
1729         u32 adv, bmcr;
1730         u32 new_adv = 0;
1731
1732         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1733                 return (bnx2_setup_remote_phy(bp, port));
1734
1735         if (!(bp->autoneg & AUTONEG_SPEED)) {
1736                 u32 new_bmcr;
1737                 int force_link_down = 0;
1738
1739                 if (bp->req_line_speed == SPEED_2500) {
1740                         if (!bnx2_test_and_enable_2g5(bp))
1741                                 force_link_down = 1;
1742                 } else if (bp->req_line_speed == SPEED_1000) {
1743                         if (bnx2_test_and_disable_2g5(bp))
1744                                 force_link_down = 1;
1745                 }
1746                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1747                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1748
1749                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1750                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1751                 new_bmcr |= BMCR_SPEED1000;
1752
1753                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1754                         if (bp->req_line_speed == SPEED_2500)
1755                                 bnx2_enable_forced_2g5(bp);
1756                         else if (bp->req_line_speed == SPEED_1000) {
1757                                 bnx2_disable_forced_2g5(bp);
1758                                 new_bmcr &= ~0x2000;
1759                         }
1760
1761                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1762                         if (bp->req_line_speed == SPEED_2500)
1763                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1764                         else
1765                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1766                 }
1767
1768                 if (bp->req_duplex == DUPLEX_FULL) {
1769                         adv |= ADVERTISE_1000XFULL;
1770                         new_bmcr |= BMCR_FULLDPLX;
1771                 }
1772                 else {
1773                         adv |= ADVERTISE_1000XHALF;
1774                         new_bmcr &= ~BMCR_FULLDPLX;
1775                 }
1776                 if ((new_bmcr != bmcr) || (force_link_down)) {
1777                         /* Force a link down visible on the other side */
1778                         if (bp->link_up) {
1779                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1780                                                ~(ADVERTISE_1000XFULL |
1781                                                  ADVERTISE_1000XHALF));
1782                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1783                                         BMCR_ANRESTART | BMCR_ANENABLE);
1784
1785                                 bp->link_up = 0;
1786                                 netif_carrier_off(bp->dev);
1787                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788                                 bnx2_report_link(bp);
1789                         }
1790                         bnx2_write_phy(bp, bp->mii_adv, adv);
1791                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792                 } else {
1793                         bnx2_resolve_flow_ctrl(bp);
1794                         bnx2_set_mac_link(bp);
1795                 }
1796                 return 0;
1797         }
1798
1799         bnx2_test_and_enable_2g5(bp);
1800
1801         if (bp->advertising & ADVERTISED_1000baseT_Full)
1802                 new_adv |= ADVERTISE_1000XFULL;
1803
1804         new_adv |= bnx2_phy_get_pause_adv(bp);
1805
1806         bnx2_read_phy(bp, bp->mii_adv, &adv);
1807         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1808
1809         bp->serdes_an_pending = 0;
1810         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1811                 /* Force a link down visible on the other side */
1812                 if (bp->link_up) {
1813                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1814                         spin_unlock_bh(&bp->phy_lock);
1815                         msleep(20);
1816                         spin_lock_bh(&bp->phy_lock);
1817                 }
1818
1819                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1820                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1821                         BMCR_ANENABLE);
1822                 /* Speed up link-up time when the link partner
1823                  * does not autonegotiate which is very common
1824                  * in blade servers. Some blade servers use
1825                  * IPMI for kerboard input and it's important
1826                  * to minimize link disruptions. Autoneg. involves
1827                  * exchanging base pages plus 3 next pages and
1828                  * normally completes in about 120 msec.
1829                  */
1830                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1831                 bp->serdes_an_pending = 1;
1832                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1833         } else {
1834                 bnx2_resolve_flow_ctrl(bp);
1835                 bnx2_set_mac_link(bp);
1836         }
1837
1838         return 0;
1839 }
1840
1841 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1842         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1843                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1844                 (ADVERTISED_1000baseT_Full)
1845
1846 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1847         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1848         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1849         ADVERTISED_1000baseT_Full)
1850
1851 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1852         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1853
1854 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1855
1856 static void
1857 bnx2_set_default_remote_link(struct bnx2 *bp)
1858 {
1859         u32 link;
1860
1861         if (bp->phy_port == PORT_TP)
1862                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1863         else
1864                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1865
1866         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1867                 bp->req_line_speed = 0;
1868                 bp->autoneg |= AUTONEG_SPEED;
1869                 bp->advertising = ADVERTISED_Autoneg;
1870                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1871                         bp->advertising |= ADVERTISED_10baseT_Half;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1873                         bp->advertising |= ADVERTISED_10baseT_Full;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1875                         bp->advertising |= ADVERTISED_100baseT_Half;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1877                         bp->advertising |= ADVERTISED_100baseT_Full;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1879                         bp->advertising |= ADVERTISED_1000baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1881                         bp->advertising |= ADVERTISED_2500baseX_Full;
1882         } else {
1883                 bp->autoneg = 0;
1884                 bp->advertising = 0;
1885                 bp->req_duplex = DUPLEX_FULL;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1887                         bp->req_line_speed = SPEED_10;
1888                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1889                                 bp->req_duplex = DUPLEX_HALF;
1890                 }
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1892                         bp->req_line_speed = SPEED_100;
1893                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1894                                 bp->req_duplex = DUPLEX_HALF;
1895                 }
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1897                         bp->req_line_speed = SPEED_1000;
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1899                         bp->req_line_speed = SPEED_2500;
1900         }
1901 }
1902
1903 static void
1904 bnx2_set_default_link(struct bnx2 *bp)
1905 {
1906         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1907                 bnx2_set_default_remote_link(bp);
1908                 return;
1909         }
1910
1911         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1912         bp->req_line_speed = 0;
1913         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1914                 u32 reg;
1915
1916                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1917
1918                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1919                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1920                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1921                         bp->autoneg = 0;
1922                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1923                         bp->req_duplex = DUPLEX_FULL;
1924                 }
1925         } else
1926                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1927 }
1928
1929 static void
1930 bnx2_send_heart_beat(struct bnx2 *bp)
1931 {
1932         u32 msg;
1933         u32 addr;
1934
1935         spin_lock(&bp->indirect_lock);
1936         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1937         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1938         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1939         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1940         spin_unlock(&bp->indirect_lock);
1941 }
1942
1943 static void
1944 bnx2_remote_phy_event(struct bnx2 *bp)
1945 {
1946         u32 msg;
1947         u8 link_up = bp->link_up;
1948         u8 old_port;
1949
1950         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1951
1952         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1953                 bnx2_send_heart_beat(bp);
1954
1955         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1956
1957         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1958                 bp->link_up = 0;
1959         else {
1960                 u32 speed;
1961
1962                 bp->link_up = 1;
1963                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1964                 bp->duplex = DUPLEX_FULL;
1965                 switch (speed) {
1966                         case BNX2_LINK_STATUS_10HALF:
1967                                 bp->duplex = DUPLEX_HALF;
1968                         case BNX2_LINK_STATUS_10FULL:
1969                                 bp->line_speed = SPEED_10;
1970                                 break;
1971                         case BNX2_LINK_STATUS_100HALF:
1972                                 bp->duplex = DUPLEX_HALF;
1973                         case BNX2_LINK_STATUS_100BASE_T4:
1974                         case BNX2_LINK_STATUS_100FULL:
1975                                 bp->line_speed = SPEED_100;
1976                                 break;
1977                         case BNX2_LINK_STATUS_1000HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_1000FULL:
1980                                 bp->line_speed = SPEED_1000;
1981                                 break;
1982                         case BNX2_LINK_STATUS_2500HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_2500FULL:
1985                                 bp->line_speed = SPEED_2500;
1986                                 break;
1987                         default:
1988                                 bp->line_speed = 0;
1989                                 break;
1990                 }
1991
1992                 bp->flow_ctrl = 0;
1993                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1994                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1995                         if (bp->duplex == DUPLEX_FULL)
1996                                 bp->flow_ctrl = bp->req_flow_ctrl;
1997                 } else {
1998                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1999                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2000                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2001                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2002                 }
2003
2004                 old_port = bp->phy_port;
2005                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2006                         bp->phy_port = PORT_FIBRE;
2007                 else
2008                         bp->phy_port = PORT_TP;
2009
2010                 if (old_port != bp->phy_port)
2011                         bnx2_set_default_link(bp);
2012
2013         }
2014         if (bp->link_up != link_up)
2015                 bnx2_report_link(bp);
2016
2017         bnx2_set_mac_link(bp);
2018 }
2019
2020 static int
2021 bnx2_set_remote_link(struct bnx2 *bp)
2022 {
2023         u32 evt_code;
2024
2025         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2026         switch (evt_code) {
2027                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2028                         bnx2_remote_phy_event(bp);
2029                         break;
2030                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2031                 default:
2032                         bnx2_send_heart_beat(bp);
2033                         break;
2034         }
2035         return 0;
2036 }
2037
2038 static int
2039 bnx2_setup_copper_phy(struct bnx2 *bp)
2040 __releases(&bp->phy_lock)
2041 __acquires(&bp->phy_lock)
2042 {
2043         u32 bmcr;
2044         u32 new_bmcr;
2045
2046         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2047
2048         if (bp->autoneg & AUTONEG_SPEED) {
2049                 u32 adv_reg, adv1000_reg;
2050                 u32 new_adv_reg = 0;
2051                 u32 new_adv1000_reg = 0;
2052
2053                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2054                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2055                         ADVERTISE_PAUSE_ASYM);
2056
2057                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2058                 adv1000_reg &= PHY_ALL_1000_SPEED;
2059
2060                 if (bp->advertising & ADVERTISED_10baseT_Half)
2061                         new_adv_reg |= ADVERTISE_10HALF;
2062                 if (bp->advertising & ADVERTISED_10baseT_Full)
2063                         new_adv_reg |= ADVERTISE_10FULL;
2064                 if (bp->advertising & ADVERTISED_100baseT_Half)
2065                         new_adv_reg |= ADVERTISE_100HALF;
2066                 if (bp->advertising & ADVERTISED_100baseT_Full)
2067                         new_adv_reg |= ADVERTISE_100FULL;
2068                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2069                         new_adv1000_reg |= ADVERTISE_1000FULL;
2070
2071                 new_adv_reg |= ADVERTISE_CSMA;
2072
2073                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2074
2075                 if ((adv1000_reg != new_adv1000_reg) ||
2076                         (adv_reg != new_adv_reg) ||
2077                         ((bmcr & BMCR_ANENABLE) == 0)) {
2078
2079                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2080                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2081                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082                                 BMCR_ANENABLE);
2083                 }
2084                 else if (bp->link_up) {
2085                         /* Flow ctrl may have changed from auto to forced */
2086                         /* or vice-versa. */
2087
2088                         bnx2_resolve_flow_ctrl(bp);
2089                         bnx2_set_mac_link(bp);
2090                 }
2091                 return 0;
2092         }
2093
2094         new_bmcr = 0;
2095         if (bp->req_line_speed == SPEED_100) {
2096                 new_bmcr |= BMCR_SPEED100;
2097         }
2098         if (bp->req_duplex == DUPLEX_FULL) {
2099                 new_bmcr |= BMCR_FULLDPLX;
2100         }
2101         if (new_bmcr != bmcr) {
2102                 u32 bmsr;
2103
2104                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106
2107                 if (bmsr & BMSR_LSTATUS) {
2108                         /* Force link down */
2109                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110                         spin_unlock_bh(&bp->phy_lock);
2111                         msleep(50);
2112                         spin_lock_bh(&bp->phy_lock);
2113
2114                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 }
2117
2118                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119
2120                 /* Normally, the new speed is setup after the link has
2121                  * gone down and up again. In some cases, link will not go
2122                  * down so we need to set up the new speed here.
2123                  */
2124                 if (bmsr & BMSR_LSTATUS) {
2125                         bp->line_speed = bp->req_line_speed;
2126                         bp->duplex = bp->req_duplex;
2127                         bnx2_resolve_flow_ctrl(bp);
2128                         bnx2_set_mac_link(bp);
2129                 }
2130         } else {
2131                 bnx2_resolve_flow_ctrl(bp);
2132                 bnx2_set_mac_link(bp);
2133         }
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142         if (bp->loopback == MAC_LOOPBACK)
2143                 return 0;
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146                 return (bnx2_setup_serdes_phy(bp, port));
2147         }
2148         else {
2149                 return (bnx2_setup_copper_phy(bp));
2150         }
2151 }
2152
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156         u32 val;
2157
2158         bp->mii_bmcr = MII_BMCR + 0x10;
2159         bp->mii_bmsr = MII_BMSR + 0x10;
2160         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161         bp->mii_adv = MII_ADVERTISE + 0x10;
2162         bp->mii_lpa = MII_LPA + 0x10;
2163         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169         if (reset_phy)
2170                 bnx2_reset_phy(bp);
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173
2174         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182                 val |= BCM5708S_UP1_2G5;
2183         else
2184                 val &= ~BCM5708S_UP1_2G5;
2185         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186
2187         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193
2194         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206         u32 val;
2207
2208         if (reset_phy)
2209                 bnx2_reset_phy(bp);
2210
2211         bp->mii_up1 = BCM5708S_UP1;
2212
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216
2217         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220
2221         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224
2225         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227                 val |= BCM5708S_UP1_2G5;
2228                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2229         }
2230
2231         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2232             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2233             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2234                 /* increase tx signal amplitude */
2235                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236                                BCM5708S_BLK_ADDR_TX_MISC);
2237                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241         }
2242
2243         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245
2246         if (val) {
2247                 u32 is_backplane;
2248
2249                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_TX_MISC);
2253                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_DIG);
2256                 }
2257         }
2258         return 0;
2259 }
2260
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264         if (reset_phy)
2265                 bnx2_reset_phy(bp);
2266
2267         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268
2269         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2270                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271
2272         if (bp->dev->mtu > 1500) {
2273                 u32 val;
2274
2275                 /* Set extended packet length bit */
2276                 bnx2_write_phy(bp, 0x18, 0x7);
2277                 bnx2_read_phy(bp, 0x18, &val);
2278                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279
2280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281                 bnx2_read_phy(bp, 0x1c, &val);
2282                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283         }
2284         else {
2285                 u32 val;
2286
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302         u32 val;
2303
2304         if (reset_phy)
2305                 bnx2_reset_phy(bp);
2306
2307         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308                 bnx2_write_phy(bp, 0x18, 0x0c00);
2309                 bnx2_write_phy(bp, 0x17, 0x000a);
2310                 bnx2_write_phy(bp, 0x15, 0x310b);
2311                 bnx2_write_phy(bp, 0x17, 0x201f);
2312                 bnx2_write_phy(bp, 0x15, 0x9506);
2313                 bnx2_write_phy(bp, 0x17, 0x401f);
2314                 bnx2_write_phy(bp, 0x15, 0x14e2);
2315                 bnx2_write_phy(bp, 0x18, 0x0400);
2316         }
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2321                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322                 val &= ~(1 << 8);
2323                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324         }
2325
2326         if (bp->dev->mtu > 1500) {
2327                 /* Set extended packet length bit */
2328                 bnx2_write_phy(bp, 0x18, 0x7);
2329                 bnx2_read_phy(bp, 0x18, &val);
2330                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331
2332                 bnx2_read_phy(bp, 0x10, &val);
2333                 bnx2_write_phy(bp, 0x10, val | 0x1);
2334         }
2335         else {
2336                 bnx2_write_phy(bp, 0x18, 0x7);
2337                 bnx2_read_phy(bp, 0x18, &val);
2338                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339
2340                 bnx2_read_phy(bp, 0x10, &val);
2341                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2342         }
2343
2344         /* ethernet@wirespeed */
2345         bnx2_write_phy(bp, 0x18, 0x7007);
2346         bnx2_read_phy(bp, 0x18, &val);
2347         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348         return 0;
2349 }
2350
2351
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357         u32 val;
2358         int rc = 0;
2359
2360         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362
2363         bp->mii_bmcr = MII_BMCR;
2364         bp->mii_bmsr = MII_BMSR;
2365         bp->mii_bmsr1 = MII_BMSR;
2366         bp->mii_adv = MII_ADVERTISE;
2367         bp->mii_lpa = MII_LPA;
2368
2369         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370
2371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372                 goto setup_phy;
2373
2374         bnx2_read_phy(bp, MII_PHYSID1, &val);
2375         bp->phy_id = val << 16;
2376         bnx2_read_phy(bp, MII_PHYSID2, &val);
2377         bp->phy_id |= val & 0xffff;
2378
2379         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2381                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2383                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2384                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2385                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2386         }
2387         else {
2388                 rc = bnx2_init_copper_phy(bp, reset_phy);
2389         }
2390
2391 setup_phy:
2392         if (!rc)
2393                 rc = bnx2_setup_phy(bp, bp->phy_port);
2394
2395         return rc;
2396 }
2397
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401         u32 mac_mode;
2402
2403         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2404         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407         bp->link_up = 1;
2408         return 0;
2409 }
2410
2411 static int bnx2_test_link(struct bnx2 *);
2412
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416         u32 mac_mode;
2417         int rc, i;
2418
2419         spin_lock_bh(&bp->phy_lock);
2420         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421                             BMCR_SPEED1000);
2422         spin_unlock_bh(&bp->phy_lock);
2423         if (rc)
2424                 return rc;
2425
2426         for (i = 0; i < 10; i++) {
2427                 if (bnx2_test_link(bp) == 0)
2428                         break;
2429                 msleep(100);
2430         }
2431
2432         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435                       BNX2_EMAC_MODE_25G_MODE);
2436
2437         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439         bp->link_up = 1;
2440         return 0;
2441 }
2442
2443 static int
2444 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2445 {
2446         int i;
2447         u32 val;
2448
2449         bp->fw_wr_seq++;
2450         msg_data |= bp->fw_wr_seq;
2451
2452         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2453
2454         if (!ack)
2455                 return 0;
2456
2457         /* wait for an acknowledgement. */
2458         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2459                 msleep(10);
2460
2461                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2462
2463                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2464                         break;
2465         }
2466         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2467                 return 0;
2468
2469         /* If we timed out, inform the firmware that this is the case. */
2470         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2471                 if (!silent)
2472                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2473                                             "%x\n", msg_data);
2474
2475                 msg_data &= ~BNX2_DRV_MSG_CODE;
2476                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2477
2478                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2479
2480                 return -EBUSY;
2481         }
2482
2483         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2484                 return -EIO;
2485
2486         return 0;
2487 }
2488
2489 static int
2490 bnx2_init_5709_context(struct bnx2 *bp)
2491 {
2492         int i, ret = 0;
2493         u32 val;
2494
2495         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2496         val |= (BCM_PAGE_BITS - 8) << 16;
2497         REG_WR(bp, BNX2_CTX_COMMAND, val);
2498         for (i = 0; i < 10; i++) {
2499                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2500                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2501                         break;
2502                 udelay(2);
2503         }
2504         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2505                 return -EBUSY;
2506
2507         for (i = 0; i < bp->ctx_pages; i++) {
2508                 int j;
2509
2510                 if (bp->ctx_blk[i])
2511                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2512                 else
2513                         return -ENOMEM;
2514
2515                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2516                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2517                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2518                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2519                        (u64) bp->ctx_blk_mapping[i] >> 32);
2520                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2521                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2522                 for (j = 0; j < 10; j++) {
2523
2524                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2525                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2526                                 break;
2527                         udelay(5);
2528                 }
2529                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2530                         ret = -EBUSY;
2531                         break;
2532                 }
2533         }
2534         return ret;
2535 }
2536
2537 static void
2538 bnx2_init_context(struct bnx2 *bp)
2539 {
2540         u32 vcid;
2541
2542         vcid = 96;
2543         while (vcid) {
2544                 u32 vcid_addr, pcid_addr, offset;
2545                 int i;
2546
2547                 vcid--;
2548
2549                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2550                         u32 new_vcid;
2551
2552                         vcid_addr = GET_PCID_ADDR(vcid);
2553                         if (vcid & 0x8) {
2554                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2555                         }
2556                         else {
2557                                 new_vcid = vcid;
2558                         }
2559                         pcid_addr = GET_PCID_ADDR(new_vcid);
2560                 }
2561                 else {
2562                         vcid_addr = GET_CID_ADDR(vcid);
2563                         pcid_addr = vcid_addr;
2564                 }
2565
2566                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2567                         vcid_addr += (i << PHY_CTX_SHIFT);
2568                         pcid_addr += (i << PHY_CTX_SHIFT);
2569
2570                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2571                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2572
2573                         /* Zero out the context. */
2574                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2575                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2576                 }
2577         }
2578 }
2579
2580 static int
2581 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2582 {
2583         u16 *good_mbuf;
2584         u32 good_mbuf_cnt;
2585         u32 val;
2586
2587         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2588         if (good_mbuf == NULL) {
2589                 printk(KERN_ERR PFX "Failed to allocate memory in "
2590                                     "bnx2_alloc_bad_rbuf\n");
2591                 return -ENOMEM;
2592         }
2593
2594         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2595                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2596
2597         good_mbuf_cnt = 0;
2598
2599         /* Allocate a bunch of mbufs and save the good ones in an array. */
2600         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2601         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2602                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2603                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2604
2605                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2606
2607                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2608
2609                 /* The addresses with Bit 9 set are bad memory blocks. */
2610                 if (!(val & (1 << 9))) {
2611                         good_mbuf[good_mbuf_cnt] = (u16) val;
2612                         good_mbuf_cnt++;
2613                 }
2614
2615                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2616         }
2617
2618         /* Free the good ones back to the mbuf pool thus discarding
2619          * all the bad ones. */
2620         while (good_mbuf_cnt) {
2621                 good_mbuf_cnt--;
2622
2623                 val = good_mbuf[good_mbuf_cnt];
2624                 val = (val << 9) | val | 1;
2625
2626                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2627         }
2628         kfree(good_mbuf);
2629         return 0;
2630 }
2631
2632 static void
2633 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2634 {
2635         u32 val;
2636
2637         val = (mac_addr[0] << 8) | mac_addr[1];
2638
2639         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2640
2641         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2642                 (mac_addr[4] << 8) | mac_addr[5];
2643
2644         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2645 }
2646
2647 static inline int
2648 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2649 {
2650         dma_addr_t mapping;
2651         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2652         struct rx_bd *rxbd =
2653                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2654         struct page *page = alloc_page(GFP_ATOMIC);
2655
2656         if (!page)
2657                 return -ENOMEM;
2658         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2659                                PCI_DMA_FROMDEVICE);
2660         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2661                 __free_page(page);
2662                 return -EIO;
2663         }
2664
2665         rx_pg->page = page;
2666         pci_unmap_addr_set(rx_pg, mapping, mapping);
2667         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2668         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2669         return 0;
2670 }
2671
2672 static void
2673 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2674 {
2675         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2676         struct page *page = rx_pg->page;
2677
2678         if (!page)
2679                 return;
2680
2681         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2682                        PCI_DMA_FROMDEVICE);
2683
2684         __free_page(page);
2685         rx_pg->page = NULL;
2686 }
2687
2688 static inline int
2689 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2690 {
2691         struct sk_buff *skb;
2692         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2693         dma_addr_t mapping;
2694         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2695         unsigned long align;
2696
2697         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2698         if (skb == NULL) {
2699                 return -ENOMEM;
2700         }
2701
2702         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2703                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2704
2705         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2706                 PCI_DMA_FROMDEVICE);
2707         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2708                 dev_kfree_skb(skb);
2709                 return -EIO;
2710         }
2711
2712         rx_buf->skb = skb;
2713         pci_unmap_addr_set(rx_buf, mapping, mapping);
2714
2715         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2716         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2717
2718         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2719
2720         return 0;
2721 }
2722
2723 static int
2724 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2725 {
2726         struct status_block *sblk = bnapi->status_blk.msi;
2727         u32 new_link_state, old_link_state;
2728         int is_set = 1;
2729
2730         new_link_state = sblk->status_attn_bits & event;
2731         old_link_state = sblk->status_attn_bits_ack & event;
2732         if (new_link_state != old_link_state) {
2733                 if (new_link_state)
2734                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2735                 else
2736                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2737         } else
2738                 is_set = 0;
2739
2740         return is_set;
2741 }
2742
2743 static void
2744 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2745 {
2746         spin_lock(&bp->phy_lock);
2747
2748         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2749                 bnx2_set_link(bp);
2750         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2751                 bnx2_set_remote_link(bp);
2752
2753         spin_unlock(&bp->phy_lock);
2754
2755 }
2756
2757 static inline u16
2758 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2759 {
2760         u16 cons;
2761
2762         /* Tell compiler that status block fields can change. */
2763         barrier();
2764         cons = *bnapi->hw_tx_cons_ptr;
2765         barrier();
2766         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2767                 cons++;
2768         return cons;
2769 }
2770
2771 static int
2772 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2773 {
2774         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2775         u16 hw_cons, sw_cons, sw_ring_cons;
2776         int tx_pkt = 0, index;
2777         struct netdev_queue *txq;
2778
2779         index = (bnapi - bp->bnx2_napi);
2780         txq = netdev_get_tx_queue(bp->dev, index);
2781
2782         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2783         sw_cons = txr->tx_cons;
2784
2785         while (sw_cons != hw_cons) {
2786                 struct sw_tx_bd *tx_buf;
2787                 struct sk_buff *skb;
2788                 int i, last;
2789
2790                 sw_ring_cons = TX_RING_IDX(sw_cons);
2791
2792                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2793                 skb = tx_buf->skb;
2794
2795                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2796                 prefetch(&skb->end);
2797
2798                 /* partial BD completions possible with TSO packets */
2799                 if (tx_buf->is_gso) {
2800                         u16 last_idx, last_ring_idx;
2801
2802                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2803                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2804                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2805                                 last_idx++;
2806                         }
2807                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2808                                 break;
2809                         }
2810                 }
2811
2812                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2813
2814                 tx_buf->skb = NULL;
2815                 last = tx_buf->nr_frags;
2816
2817                 for (i = 0; i < last; i++) {
2818                         sw_cons = NEXT_TX_BD(sw_cons);
2819                 }
2820
2821                 sw_cons = NEXT_TX_BD(sw_cons);
2822
2823                 dev_kfree_skb(skb);
2824                 tx_pkt++;
2825                 if (tx_pkt == budget)
2826                         break;
2827
2828                 if (hw_cons == sw_cons)
2829                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2830         }
2831
2832         txr->hw_tx_cons = hw_cons;
2833         txr->tx_cons = sw_cons;
2834
2835         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2836          * before checking for netif_tx_queue_stopped().  Without the
2837          * memory barrier, there is a small possibility that bnx2_start_xmit()
2838          * will miss it and cause the queue to be stopped forever.
2839          */
2840         smp_mb();
2841
2842         if (unlikely(netif_tx_queue_stopped(txq)) &&
2843                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2844                 __netif_tx_lock(txq, smp_processor_id());
2845                 if ((netif_tx_queue_stopped(txq)) &&
2846                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2847                         netif_tx_wake_queue(txq);
2848                 __netif_tx_unlock(txq);
2849         }
2850
2851         return tx_pkt;
2852 }
2853
2854 static void
2855 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2856                         struct sk_buff *skb, int count)
2857 {
2858         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2859         struct rx_bd *cons_bd, *prod_bd;
2860         int i;
2861         u16 hw_prod, prod;
2862         u16 cons = rxr->rx_pg_cons;
2863
2864         cons_rx_pg = &rxr->rx_pg_ring[cons];
2865
2866         /* The caller was unable to allocate a new page to replace the
2867          * last one in the frags array, so we need to recycle that page
2868          * and then free the skb.
2869          */
2870         if (skb) {
2871                 struct page *page;
2872                 struct skb_shared_info *shinfo;
2873
2874                 shinfo = skb_shinfo(skb);
2875                 shinfo->nr_frags--;
2876                 page = shinfo->frags[shinfo->nr_frags].page;
2877                 shinfo->frags[shinfo->nr_frags].page = NULL;
2878
2879                 cons_rx_pg->page = page;
2880                 dev_kfree_skb(skb);
2881         }
2882
2883         hw_prod = rxr->rx_pg_prod;
2884
2885         for (i = 0; i < count; i++) {
2886                 prod = RX_PG_RING_IDX(hw_prod);
2887
2888                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2889                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2890                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2891                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2892
2893                 if (prod != cons) {
2894                         prod_rx_pg->page = cons_rx_pg->page;
2895                         cons_rx_pg->page = NULL;
2896                         pci_unmap_addr_set(prod_rx_pg, mapping,
2897                                 pci_unmap_addr(cons_rx_pg, mapping));
2898
2899                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2900                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2901
2902                 }
2903                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2904                 hw_prod = NEXT_RX_BD(hw_prod);
2905         }
2906         rxr->rx_pg_prod = hw_prod;
2907         rxr->rx_pg_cons = cons;
2908 }
2909
2910 static inline void
2911 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2912                   struct sk_buff *skb, u16 cons, u16 prod)
2913 {
2914         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2915         struct rx_bd *cons_bd, *prod_bd;
2916
2917         cons_rx_buf = &rxr->rx_buf_ring[cons];
2918         prod_rx_buf = &rxr->rx_buf_ring[prod];
2919
2920         pci_dma_sync_single_for_device(bp->pdev,
2921                 pci_unmap_addr(cons_rx_buf, mapping),
2922                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2923
2924         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2925
2926         prod_rx_buf->skb = skb;
2927
2928         if (cons == prod)
2929                 return;
2930
2931         pci_unmap_addr_set(prod_rx_buf, mapping,
2932                         pci_unmap_addr(cons_rx_buf, mapping));
2933
2934         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2935         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2936         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2937         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2938 }
2939
2940 static int
2941 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2942             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2943             u32 ring_idx)
2944 {
2945         int err;
2946         u16 prod = ring_idx & 0xffff;
2947
2948         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2949         if (unlikely(err)) {
2950                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2951                 if (hdr_len) {
2952                         unsigned int raw_len = len + 4;
2953                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2954
2955                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2956                 }
2957                 return err;
2958         }
2959
2960         skb_reserve(skb, BNX2_RX_OFFSET);
2961         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2962                          PCI_DMA_FROMDEVICE);
2963
2964         if (hdr_len == 0) {
2965                 skb_put(skb, len);
2966                 return 0;
2967         } else {
2968                 unsigned int i, frag_len, frag_size, pages;
2969                 struct sw_pg *rx_pg;
2970                 u16 pg_cons = rxr->rx_pg_cons;
2971                 u16 pg_prod = rxr->rx_pg_prod;
2972
2973                 frag_size = len + 4 - hdr_len;
2974                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2975                 skb_put(skb, hdr_len);
2976
2977                 for (i = 0; i < pages; i++) {
2978                         dma_addr_t mapping_old;
2979
2980                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2981                         if (unlikely(frag_len <= 4)) {
2982                                 unsigned int tail = 4 - frag_len;
2983
2984                                 rxr->rx_pg_cons = pg_cons;
2985                                 rxr->rx_pg_prod = pg_prod;
2986                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2987                                                         pages - i);
2988                                 skb->len -= tail;
2989                                 if (i == 0) {
2990                                         skb->tail -= tail;
2991                                 } else {
2992                                         skb_frag_t *frag =
2993                                                 &skb_shinfo(skb)->frags[i - 1];
2994                                         frag->size -= tail;
2995                                         skb->data_len -= tail;
2996                                         skb->truesize -= tail;
2997                                 }
2998                                 return 0;
2999                         }
3000                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3001
3002                         /* Don't unmap yet.  If we're unable to allocate a new
3003                          * page, we need to recycle the page and the DMA addr.
3004                          */
3005                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3006                         if (i == pages - 1)
3007                                 frag_len -= 4;
3008
3009                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3010                         rx_pg->page = NULL;
3011
3012                         err = bnx2_alloc_rx_page(bp, rxr,
3013                                                  RX_PG_RING_IDX(pg_prod));
3014                         if (unlikely(err)) {
3015                                 rxr->rx_pg_cons = pg_cons;
3016                                 rxr->rx_pg_prod = pg_prod;
3017                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3018                                                         pages - i);
3019                                 return err;
3020                         }
3021
3022                         pci_unmap_page(bp->pdev, mapping_old,
3023                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3024
3025                         frag_size -= frag_len;
3026                         skb->data_len += frag_len;
3027                         skb->truesize += frag_len;
3028                         skb->len += frag_len;
3029
3030                         pg_prod = NEXT_RX_BD(pg_prod);
3031                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3032                 }
3033                 rxr->rx_pg_prod = pg_prod;
3034                 rxr->rx_pg_cons = pg_cons;
3035         }
3036         return 0;
3037 }
3038
3039 static inline u16
3040 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3041 {
3042         u16 cons;
3043
3044         /* Tell compiler that status block fields can change. */
3045         barrier();
3046         cons = *bnapi->hw_rx_cons_ptr;
3047         barrier();
3048         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3049                 cons++;
3050         return cons;
3051 }
3052
3053 static int
3054 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3055 {
3056         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3057         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3058         struct l2_fhdr *rx_hdr;
3059         int rx_pkt = 0, pg_ring_used = 0;
3060
3061         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3062         sw_cons = rxr->rx_cons;
3063         sw_prod = rxr->rx_prod;
3064
3065         /* Memory barrier necessary as speculative reads of the rx
3066          * buffer can be ahead of the index in the status block
3067          */
3068         rmb();
3069         while (sw_cons != hw_cons) {
3070                 unsigned int len, hdr_len;
3071                 u32 status;
3072                 struct sw_bd *rx_buf;
3073                 struct sk_buff *skb;
3074                 dma_addr_t dma_addr;
3075                 u16 vtag = 0;
3076                 int hw_vlan __maybe_unused = 0;
3077
3078                 sw_ring_cons = RX_RING_IDX(sw_cons);
3079                 sw_ring_prod = RX_RING_IDX(sw_prod);
3080
3081                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3082                 skb = rx_buf->skb;
3083
3084                 rx_buf->skb = NULL;
3085
3086                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3087
3088                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3089                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3090                         PCI_DMA_FROMDEVICE);
3091
3092                 rx_hdr = (struct l2_fhdr *) skb->data;
3093                 len = rx_hdr->l2_fhdr_pkt_len;
3094                 status = rx_hdr->l2_fhdr_status;
3095
3096                 hdr_len = 0;
3097                 if (status & L2_FHDR_STATUS_SPLIT) {
3098                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3099                         pg_ring_used = 1;
3100                 } else if (len > bp->rx_jumbo_thresh) {
3101                         hdr_len = bp->rx_jumbo_thresh;
3102                         pg_ring_used = 1;
3103                 }
3104
3105                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3106                                        L2_FHDR_ERRORS_PHY_DECODE |
3107                                        L2_FHDR_ERRORS_ALIGNMENT |
3108                                        L2_FHDR_ERRORS_TOO_SHORT |
3109                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3110
3111                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3112                                           sw_ring_prod);
3113                         if (pg_ring_used) {
3114                                 int pages;
3115
3116                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3117
3118                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3119                         }
3120                         goto next_rx;
3121                 }
3122
3123                 len -= 4;
3124
3125                 if (len <= bp->rx_copy_thresh) {
3126                         struct sk_buff *new_skb;
3127
3128                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3129                         if (new_skb == NULL) {
3130                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3131                                                   sw_ring_prod);
3132                                 goto next_rx;
3133                         }
3134
3135                         /* aligned copy */
3136                         skb_copy_from_linear_data_offset(skb,
3137                                                          BNX2_RX_OFFSET - 6,
3138                                       new_skb->data, len + 6);
3139                         skb_reserve(new_skb, 6);
3140                         skb_put(new_skb, len);
3141
3142                         bnx2_reuse_rx_skb(bp, rxr, skb,
3143                                 sw_ring_cons, sw_ring_prod);
3144
3145                         skb = new_skb;
3146                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3147                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3148                         goto next_rx;
3149
3150                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3151                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3152                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3153 #ifdef BCM_VLAN
3154                         if (bp->vlgrp)
3155                                 hw_vlan = 1;
3156                         else
3157 #endif
3158                         {
3159                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3160                                         __skb_push(skb, 4);
3161
3162                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3163                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3164                                 ve->h_vlan_TCI = htons(vtag);
3165                                 len += 4;
3166                         }
3167                 }
3168
3169                 skb->protocol = eth_type_trans(skb, bp->dev);
3170
3171                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3172                         (ntohs(skb->protocol) != 0x8100)) {
3173
3174                         dev_kfree_skb(skb);
3175                         goto next_rx;
3176
3177                 }
3178
3179                 skb->ip_summed = CHECKSUM_NONE;
3180                 if (bp->rx_csum &&
3181                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3182                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3183
3184                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3185                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3186                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3187                 }
3188
3189                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3190
3191 #ifdef BCM_VLAN
3192                 if (hw_vlan)
3193                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3194                 else
3195 #endif
3196                         netif_receive_skb(skb);
3197
3198                 rx_pkt++;
3199
3200 next_rx:
3201                 sw_cons = NEXT_RX_BD(sw_cons);
3202                 sw_prod = NEXT_RX_BD(sw_prod);
3203
3204                 if ((rx_pkt == budget))
3205                         break;
3206
3207                 /* Refresh hw_cons to see if there is new work */
3208                 if (sw_cons == hw_cons) {
3209                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3210                         rmb();
3211                 }
3212         }
3213         rxr->rx_cons = sw_cons;
3214         rxr->rx_prod = sw_prod;
3215
3216         if (pg_ring_used)
3217                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3218
3219         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3220
3221         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3222
3223         mmiowb();
3224
3225         return rx_pkt;
3226
3227 }
3228
3229 /* MSI ISR - The only difference between this and the INTx ISR
3230  * is that the MSI interrupt is always serviced.
3231  */
3232 static irqreturn_t
3233 bnx2_msi(int irq, void *dev_instance)
3234 {
3235         struct bnx2_napi *bnapi = dev_instance;
3236         struct bnx2 *bp = bnapi->bp;
3237
3238         prefetch(bnapi->status_blk.msi);
3239         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3240                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3241                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3242
3243         /* Return here if interrupt is disabled. */
3244         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3245                 return IRQ_HANDLED;
3246
3247         napi_schedule(&bnapi->napi);
3248
3249         return IRQ_HANDLED;
3250 }
3251
3252 static irqreturn_t
3253 bnx2_msi_1shot(int irq, void *dev_instance)
3254 {
3255         struct bnx2_napi *bnapi = dev_instance;
3256         struct bnx2 *bp = bnapi->bp;
3257
3258         prefetch(bnapi->status_blk.msi);
3259
3260         /* Return here if interrupt is disabled. */
3261         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3262                 return IRQ_HANDLED;
3263
3264         napi_schedule(&bnapi->napi);
3265
3266         return IRQ_HANDLED;
3267 }
3268
3269 static irqreturn_t
3270 bnx2_interrupt(int irq, void *dev_instance)
3271 {
3272         struct bnx2_napi *bnapi = dev_instance;
3273         struct bnx2 *bp = bnapi->bp;
3274         struct status_block *sblk = bnapi->status_blk.msi;
3275
3276         /* When using INTx, it is possible for the interrupt to arrive
3277          * at the CPU before the status block posted prior to the
3278          * interrupt. Reading a register will flush the status block.
3279          * When using MSI, the MSI message will always complete after
3280          * the status block write.
3281          */
3282         if ((sblk->status_idx == bnapi->last_status_idx) &&
3283             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3284              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3285                 return IRQ_NONE;
3286
3287         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3288                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3289                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3290
3291         /* Read back to deassert IRQ immediately to avoid too many
3292          * spurious interrupts.
3293          */
3294         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3295
3296         /* Return here if interrupt is shared and is disabled. */
3297         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3298                 return IRQ_HANDLED;
3299
3300         if (napi_schedule_prep(&bnapi->napi)) {
3301                 bnapi->last_status_idx = sblk->status_idx;
3302                 __napi_schedule(&bnapi->napi);
3303         }
3304
3305         return IRQ_HANDLED;
3306 }
3307
3308 static inline int
3309 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3310 {
3311         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3312         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3313
3314         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3315             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3316                 return 1;
3317         return 0;
3318 }
3319
3320 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3321                                  STATUS_ATTN_BITS_TIMER_ABORT)
3322
3323 static inline int
3324 bnx2_has_work(struct bnx2_napi *bnapi)
3325 {
3326         struct status_block *sblk = bnapi->status_blk.msi;
3327
3328         if (bnx2_has_fast_work(bnapi))
3329                 return 1;
3330
3331 #ifdef BCM_CNIC
3332         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3333                 return 1;
3334 #endif
3335
3336         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3337             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3338                 return 1;
3339
3340         return 0;
3341 }
3342
3343 static void
3344 bnx2_chk_missed_msi(struct bnx2 *bp)
3345 {
3346         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3347         u32 msi_ctrl;
3348
3349         if (bnx2_has_work(bnapi)) {
3350                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3351                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3352                         return;
3353
3354                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3355                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3356                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3357                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3358                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3359                 }
3360         }
3361
3362         bp->idle_chk_status_idx = bnapi->last_status_idx;
3363 }
3364
3365 #ifdef BCM_CNIC
3366 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3367 {
3368         struct cnic_ops *c_ops;
3369
3370         if (!bnapi->cnic_present)
3371                 return;
3372
3373         rcu_read_lock();
3374         c_ops = rcu_dereference(bp->cnic_ops);
3375         if (c_ops)
3376                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3377                                                       bnapi->status_blk.msi);
3378         rcu_read_unlock();
3379 }
3380 #endif
3381
3382 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3383 {
3384         struct status_block *sblk = bnapi->status_blk.msi;
3385         u32 status_attn_bits = sblk->status_attn_bits;
3386         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3387
3388         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3389             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3390
3391                 bnx2_phy_int(bp, bnapi);
3392
3393                 /* This is needed to take care of transient status
3394                  * during link changes.
3395                  */
3396                 REG_WR(bp, BNX2_HC_COMMAND,
3397                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3398                 REG_RD(bp, BNX2_HC_COMMAND);
3399         }
3400 }
3401
3402 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3403                           int work_done, int budget)
3404 {
3405         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3406         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3407
3408         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3409                 bnx2_tx_int(bp, bnapi, 0);
3410
3411         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3412                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3413
3414         return work_done;
3415 }
3416
3417 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3418 {
3419         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3420         struct bnx2 *bp = bnapi->bp;
3421         int work_done = 0;
3422         struct status_block_msix *sblk = bnapi->status_blk.msix;
3423
3424         while (1) {
3425                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3426                 if (unlikely(work_done >= budget))
3427                         break;
3428
3429                 bnapi->last_status_idx = sblk->status_idx;
3430                 /* status idx must be read before checking for more work. */
3431                 rmb();
3432                 if (likely(!bnx2_has_fast_work(bnapi))) {
3433
3434                         napi_complete(napi);
3435                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3436                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3437                                bnapi->last_status_idx);
3438                         break;
3439                 }
3440         }
3441         return work_done;
3442 }
3443
3444 static int bnx2_poll(struct napi_struct *napi, int budget)
3445 {
3446         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3447         struct bnx2 *bp = bnapi->bp;
3448         int work_done = 0;
3449         struct status_block *sblk = bnapi->status_blk.msi;
3450
3451         while (1) {
3452                 bnx2_poll_link(bp, bnapi);
3453
3454                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3455
3456 #ifdef BCM_CNIC
3457                 bnx2_poll_cnic(bp, bnapi);
3458 #endif
3459
3460                 /* bnapi->last_status_idx is used below to tell the hw how
3461                  * much work has been processed, so we must read it before
3462                  * checking for more work.
3463                  */
3464                 bnapi->last_status_idx = sblk->status_idx;
3465
3466                 if (unlikely(work_done >= budget))
3467                         break;
3468
3469                 rmb();
3470                 if (likely(!bnx2_has_work(bnapi))) {
3471                         napi_complete(napi);
3472                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3473                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3474                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3475                                        bnapi->last_status_idx);
3476                                 break;
3477                         }
3478                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3479                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3481                                bnapi->last_status_idx);
3482
3483                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3484                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3485                                bnapi->last_status_idx);
3486                         break;
3487                 }
3488         }
3489
3490         return work_done;
3491 }
3492
3493 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3494  * from set_multicast.
3495  */
3496 static void
3497 bnx2_set_rx_mode(struct net_device *dev)
3498 {
3499         struct bnx2 *bp = netdev_priv(dev);
3500         u32 rx_mode, sort_mode;
3501         struct netdev_hw_addr *ha;
3502         int i;
3503
3504         if (!netif_running(dev))
3505                 return;
3506
3507         spin_lock_bh(&bp->phy_lock);
3508
3509         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3510                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3511         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3512 #ifdef BCM_VLAN
3513         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3514                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3515 #else
3516         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3517                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3518 #endif
3519         if (dev->flags & IFF_PROMISC) {
3520                 /* Promiscuous mode. */
3521                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3522                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3523                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3524         }
3525         else if (dev->flags & IFF_ALLMULTI) {
3526                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3527                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3528                                0xffffffff);
3529                 }
3530                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3531         }
3532         else {
3533                 /* Accept one or more multicast(s). */
3534                 struct dev_mc_list *mclist;
3535                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3536                 u32 regidx;
3537                 u32 bit;
3538                 u32 crc;
3539
3540                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3541
3542                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3543                      i++, mclist = mclist->next) {
3544
3545                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3546                         bit = crc & 0xff;
3547                         regidx = (bit & 0xe0) >> 5;
3548                         bit &= 0x1f;
3549                         mc_filter[regidx] |= (1 << bit);
3550                 }
3551
3552                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3553                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3554                                mc_filter[i]);
3555                 }
3556
3557                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3558         }
3559
3560         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3561                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3564         } else if (!(dev->flags & IFF_PROMISC)) {
3565                 /* Add all entries into to the match filter list */
3566                 i = 0;
3567                 list_for_each_entry(ha, &dev->uc.list, list) {
3568                         bnx2_set_mac_addr(bp, ha->addr,
3569                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3570                         sort_mode |= (1 <<
3571                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3572                         i++;
3573                 }
3574
3575         }
3576
3577         if (rx_mode != bp->rx_mode) {
3578                 bp->rx_mode = rx_mode;
3579                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3580         }
3581
3582         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3583         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3584         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3585
3586         spin_unlock_bh(&bp->phy_lock);
3587 }
3588
3589 static int __devinit
3590 check_fw_section(const struct firmware *fw,
3591                  const struct bnx2_fw_file_section *section,
3592                  u32 alignment, bool non_empty)
3593 {
3594         u32 offset = be32_to_cpu(section->offset);
3595         u32 len = be32_to_cpu(section->len);
3596
3597         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3598                 return -EINVAL;
3599         if ((non_empty && len == 0) || len > fw->size - offset ||
3600             len & (alignment - 1))
3601                 return -EINVAL;
3602         return 0;
3603 }
3604
3605 static int __devinit
3606 check_mips_fw_entry(const struct firmware *fw,
3607                     const struct bnx2_mips_fw_file_entry *entry)
3608 {
3609         if (check_fw_section(fw, &entry->text, 4, true) ||
3610             check_fw_section(fw, &entry->data, 4, false) ||
3611             check_fw_section(fw, &entry->rodata, 4, false))
3612                 return -EINVAL;
3613         return 0;
3614 }
3615
3616 static int __devinit
3617 bnx2_request_firmware(struct bnx2 *bp)
3618 {
3619         const char *mips_fw_file, *rv2p_fw_file;
3620         const struct bnx2_mips_fw_file *mips_fw;
3621         const struct bnx2_rv2p_fw_file *rv2p_fw;
3622         int rc;
3623
3624         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3625                 mips_fw_file = FW_MIPS_FILE_09;
3626                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3627                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3628                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3629                 else
3630                         rv2p_fw_file = FW_RV2P_FILE_09;
3631         } else {
3632                 mips_fw_file = FW_MIPS_FILE_06;
3633                 rv2p_fw_file = FW_RV2P_FILE_06;
3634         }
3635
3636         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3637         if (rc) {
3638                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3639                        mips_fw_file);
3640                 return rc;
3641         }
3642
3643         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3644         if (rc) {
3645                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3646                        rv2p_fw_file);
3647                 return rc;
3648         }
3649         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3650         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3651         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3652             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3653             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3654             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3655             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3656             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3657                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3658                        mips_fw_file);
3659                 return -EINVAL;
3660         }
3661         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3662             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3663             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3664                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3665                        rv2p_fw_file);
3666                 return -EINVAL;
3667         }
3668
3669         return 0;
3670 }
3671
3672 static u32
3673 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3674 {
3675         switch (idx) {
3676         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3677                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3678                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3679                 break;
3680         }
3681         return rv2p_code;
3682 }
3683
3684 static int
3685 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3686              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3687 {
3688         u32 rv2p_code_len, file_offset;
3689         __be32 *rv2p_code;
3690         int i;
3691         u32 val, cmd, addr;
3692
3693         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3694         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3695
3696         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3697
3698         if (rv2p_proc == RV2P_PROC1) {
3699                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3700                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3701         } else {
3702                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3703                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3704         }
3705
3706         for (i = 0; i < rv2p_code_len; i += 8) {
3707                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3708                 rv2p_code++;
3709                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3710                 rv2p_code++;
3711
3712                 val = (i / 8) | cmd;
3713                 REG_WR(bp, addr, val);
3714         }
3715
3716         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3717         for (i = 0; i < 8; i++) {
3718                 u32 loc, code;
3719
3720                 loc = be32_to_cpu(fw_entry->fixup[i]);
3721                 if (loc && ((loc * 4) < rv2p_code_len)) {
3722                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3723                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3724                         code = be32_to_cpu(*(rv2p_code + loc));
3725                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3726                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3727
3728                         val = (loc / 2) | cmd;
3729                         REG_WR(bp, addr, val);
3730                 }
3731         }
3732
3733         /* Reset the processor, un-stall is done later. */
3734         if (rv2p_proc == RV2P_PROC1) {
3735                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3736         }
3737         else {
3738                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3739         }
3740
3741         return 0;
3742 }
3743
3744 static int
3745 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3746             const struct bnx2_mips_fw_file_entry *fw_entry)
3747 {
3748         u32 addr, len, file_offset;
3749         __be32 *data;
3750         u32 offset;
3751         u32 val;
3752
3753         /* Halt the CPU. */
3754         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3755         val |= cpu_reg->mode_value_halt;
3756         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3757         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3758
3759         /* Load the Text area. */
3760         addr = be32_to_cpu(fw_entry->text.addr);
3761         len = be32_to_cpu(fw_entry->text.len);
3762         file_offset = be32_to_cpu(fw_entry->text.offset);
3763         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3764
3765         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3766         if (len) {
3767                 int j;
3768
3769                 for (j = 0; j < (len / 4); j++, offset += 4)
3770                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3771         }
3772
3773         /* Load the Data area. */
3774         addr = be32_to_cpu(fw_entry->data.addr);
3775         len = be32_to_cpu(fw_entry->data.len);
3776         file_offset = be32_to_cpu(fw_entry->data.offset);
3777         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3778
3779         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3780         if (len) {
3781                 int j;
3782
3783                 for (j = 0; j < (len / 4); j++, offset += 4)
3784                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3785         }
3786
3787         /* Load the Read-Only area. */
3788         addr = be32_to_cpu(fw_entry->rodata.addr);
3789         len = be32_to_cpu(fw_entry->rodata.len);
3790         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3791         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3792
3793         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3794         if (len) {
3795                 int j;
3796
3797                 for (j = 0; j < (len / 4); j++, offset += 4)
3798                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3799         }
3800
3801         /* Clear the pre-fetch instruction. */
3802         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3803
3804         val = be32_to_cpu(fw_entry->start_addr);
3805         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3806
3807         /* Start the CPU. */
3808         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3809         val &= ~cpu_reg->mode_value_halt;
3810         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3811         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812
3813         return 0;
3814 }
3815
3816 static int
3817 bnx2_init_cpus(struct bnx2 *bp)
3818 {
3819         const struct bnx2_mips_fw_file *mips_fw =
3820                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3821         const struct bnx2_rv2p_fw_file *rv2p_fw =
3822                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3823         int rc;
3824
3825         /* Initialize the RV2P processor. */
3826         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3827         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3828
3829         /* Initialize the RX Processor. */
3830         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3831         if (rc)
3832                 goto init_cpu_err;
3833
3834         /* Initialize the TX Processor. */
3835         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3836         if (rc)
3837                 goto init_cpu_err;
3838
3839         /* Initialize the TX Patch-up Processor. */
3840         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3841         if (rc)
3842                 goto init_cpu_err;
3843
3844         /* Initialize the Completion Processor. */
3845         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3846         if (rc)
3847                 goto init_cpu_err;
3848
3849         /* Initialize the Command Processor. */
3850         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3851
3852 init_cpu_err:
3853         return rc;
3854 }
3855
3856 static int
3857 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3858 {
3859         u16 pmcsr;
3860
3861         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3862
3863         switch (state) {
3864         case PCI_D0: {
3865                 u32 val;
3866
3867                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3868                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3869                         PCI_PM_CTRL_PME_STATUS);
3870
3871                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3872                         /* delay required during transition out of D3hot */
3873                         msleep(20);
3874
3875                 val = REG_RD(bp, BNX2_EMAC_MODE);
3876                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3877                 val &= ~BNX2_EMAC_MODE_MPKT;
3878                 REG_WR(bp, BNX2_EMAC_MODE, val);
3879
3880                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3881                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3882                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3883                 break;
3884         }
3885         case PCI_D3hot: {
3886                 int i;
3887                 u32 val, wol_msg;
3888
3889                 if (bp->wol) {
3890                         u32 advertising;
3891                         u8 autoneg;
3892
3893                         autoneg = bp->autoneg;
3894                         advertising = bp->advertising;
3895
3896                         if (bp->phy_port == PORT_TP) {
3897                                 bp->autoneg = AUTONEG_SPEED;
3898                                 bp->advertising = ADVERTISED_10baseT_Half |
3899                                         ADVERTISED_10baseT_Full |
3900                                         ADVERTISED_100baseT_Half |
3901                                         ADVERTISED_100baseT_Full |
3902                                         ADVERTISED_Autoneg;
3903                         }
3904
3905                         spin_lock_bh(&bp->phy_lock);
3906                         bnx2_setup_phy(bp, bp->phy_port);
3907                         spin_unlock_bh(&bp->phy_lock);
3908
3909                         bp->autoneg = autoneg;
3910                         bp->advertising = advertising;
3911
3912                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3913
3914                         val = REG_RD(bp, BNX2_EMAC_MODE);
3915
3916                         /* Enable port mode. */
3917                         val &= ~BNX2_EMAC_MODE_PORT;
3918                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3919                                BNX2_EMAC_MODE_ACPI_RCVD |
3920                                BNX2_EMAC_MODE_MPKT;
3921                         if (bp->phy_port == PORT_TP)
3922                                 val |= BNX2_EMAC_MODE_PORT_MII;
3923                         else {
3924                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3925                                 if (bp->line_speed == SPEED_2500)
3926                                         val |= BNX2_EMAC_MODE_25G_MODE;
3927                         }
3928
3929                         REG_WR(bp, BNX2_EMAC_MODE, val);
3930
3931                         /* receive all multicast */
3932                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3933                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3934                                        0xffffffff);
3935                         }
3936                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3937                                BNX2_EMAC_RX_MODE_SORT_MODE);
3938
3939                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3940                               BNX2_RPM_SORT_USER0_MC_EN;
3941                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3942                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3943                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3944                                BNX2_RPM_SORT_USER0_ENA);
3945
3946                         /* Need to enable EMAC and RPM for WOL. */
3947                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3948                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3949                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3950                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3951
3952                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3953                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3954                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3955
3956                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3957                 }
3958                 else {
3959                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3960                 }
3961
3962                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3963                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3964                                      1, 0);
3965
3966                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3967                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3968                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3969
3970                         if (bp->wol)
3971                                 pmcsr |= 3;
3972                 }
3973                 else {
3974                         pmcsr |= 3;
3975                 }
3976                 if (bp->wol) {
3977                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3978                 }
3979                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3980                                       pmcsr);
3981
3982                 /* No more memory access after this point until
3983                  * device is brought back to D0.
3984                  */
3985                 udelay(50);
3986                 break;
3987         }
3988         default:
3989                 return -EINVAL;
3990         }
3991         return 0;
3992 }
3993
3994 static int
3995 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3996 {
3997         u32 val;
3998         int j;
3999
4000         /* Request access to the flash interface. */
4001         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4002         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4003                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4004                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4005                         break;
4006
4007                 udelay(5);
4008         }
4009
4010         if (j >= NVRAM_TIMEOUT_COUNT)
4011                 return -EBUSY;
4012
4013         return 0;
4014 }
4015
4016 static int
4017 bnx2_release_nvram_lock(struct bnx2 *bp)
4018 {
4019         int j;
4020         u32 val;
4021
4022         /* Relinquish nvram interface. */
4023         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4024
4025         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4026                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4027                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4028                         break;
4029
4030                 udelay(5);
4031         }
4032
4033         if (j >= NVRAM_TIMEOUT_COUNT)
4034                 return -EBUSY;
4035
4036         return 0;
4037 }
4038
4039
4040 static int
4041 bnx2_enable_nvram_write(struct bnx2 *bp)
4042 {
4043         u32 val;
4044
4045         val = REG_RD(bp, BNX2_MISC_CFG);
4046         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4047
4048         if (bp->flash_info->flags & BNX2_NV_WREN) {
4049                 int j;
4050
4051                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4052                 REG_WR(bp, BNX2_NVM_COMMAND,
4053                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4054
4055                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4056                         udelay(5);
4057
4058                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4059                         if (val & BNX2_NVM_COMMAND_DONE)
4060                                 break;
4061                 }
4062
4063                 if (j >= NVRAM_TIMEOUT_COUNT)
4064                         return -EBUSY;
4065         }
4066         return 0;
4067 }
4068
4069 static void
4070 bnx2_disable_nvram_write(struct bnx2 *bp)
4071 {
4072         u32 val;
4073
4074         val = REG_RD(bp, BNX2_MISC_CFG);
4075         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4076 }
4077
4078
4079 static void
4080 bnx2_enable_nvram_access(struct bnx2 *bp)
4081 {
4082         u32 val;
4083
4084         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4085         /* Enable both bits, even on read. */
4086         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4087                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4088 }
4089
4090 static void
4091 bnx2_disable_nvram_access(struct bnx2 *bp)
4092 {
4093         u32 val;
4094
4095         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4096         /* Disable both bits, even after read. */
4097         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4098                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4099                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4100 }
4101
4102 static int
4103 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4104 {
4105         u32 cmd;
4106         int j;
4107
4108         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4109                 /* Buffered flash, no erase needed */
4110                 return 0;
4111
4112         /* Build an erase command */
4113         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4114               BNX2_NVM_COMMAND_DOIT;
4115
4116         /* Need to clear DONE bit separately. */
4117         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4118
4119         /* Address of the NVRAM to read from. */
4120         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4121
4122         /* Issue an erase command. */
4123         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4124
4125         /* Wait for completion. */
4126         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127                 u32 val;
4128
4129                 udelay(5);
4130
4131                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4132                 if (val & BNX2_NVM_COMMAND_DONE)
4133                         break;
4134         }
4135
4136         if (j >= NVRAM_TIMEOUT_COUNT)
4137                 return -EBUSY;
4138
4139         return 0;
4140 }
4141
4142 static int
4143 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4144 {
4145         u32 cmd;
4146         int j;
4147
4148         /* Build the command word. */
4149         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4150
4151         /* Calculate an offset of a buffered flash, not needed for 5709. */
4152         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4153                 offset = ((offset / bp->flash_info->page_size) <<
4154                            bp->flash_info->page_bits) +
4155                           (offset % bp->flash_info->page_size);
4156         }
4157
4158         /* Need to clear DONE bit separately. */
4159         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4160
4161         /* Address of the NVRAM to read from. */
4162         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4163
4164         /* Issue a read command. */
4165         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4166
4167         /* Wait for completion. */
4168         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4169                 u32 val;
4170
4171                 udelay(5);
4172
4173                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4174                 if (val & BNX2_NVM_COMMAND_DONE) {
4175                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4176                         memcpy(ret_val, &v, 4);
4177                         break;
4178                 }
4179         }
4180         if (j >= NVRAM_TIMEOUT_COUNT)
4181                 return -EBUSY;
4182
4183         return 0;
4184 }
4185
4186
4187 static int
4188 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4189 {
4190         u32 cmd;
4191         __be32 val32;
4192         int j;
4193
4194         /* Build the command word. */
4195         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4196
4197         /* Calculate an offset of a buffered flash, not needed for 5709. */
4198         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4199                 offset = ((offset / bp->flash_info->page_size) <<
4200                           bp->flash_info->page_bits) +
4201                          (offset % bp->flash_info->page_size);
4202         }
4203
4204         /* Need to clear DONE bit separately. */
4205         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4206
4207         memcpy(&val32, val, 4);
4208
4209         /* Write the data. */
4210         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4211
4212         /* Address of the NVRAM to write to. */
4213         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4214
4215         /* Issue the write command. */
4216         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4217
4218         /* Wait for completion. */
4219         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4220                 udelay(5);
4221
4222                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4223                         break;
4224         }
4225         if (j >= NVRAM_TIMEOUT_COUNT)
4226                 return -EBUSY;
4227
4228         return 0;
4229 }
4230
4231 static int
4232 bnx2_init_nvram(struct bnx2 *bp)
4233 {
4234         u32 val;
4235         int j, entry_count, rc = 0;
4236         const struct flash_spec *flash;
4237
4238         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4239                 bp->flash_info = &flash_5709;
4240                 goto get_flash_size;
4241         }
4242
4243         /* Determine the selected interface. */
4244         val = REG_RD(bp, BNX2_NVM_CFG1);
4245
4246         entry_count = ARRAY_SIZE(flash_table);
4247
4248         if (val & 0x40000000) {
4249
4250                 /* Flash interface has been reconfigured */
4251                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4252                      j++, flash++) {
4253                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4254                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4255                                 bp->flash_info = flash;
4256                                 break;
4257                         }
4258                 }
4259         }
4260         else {
4261                 u32 mask;
4262                 /* Not yet been reconfigured */
4263
4264                 if (val & (1 << 23))
4265                         mask = FLASH_BACKUP_STRAP_MASK;
4266                 else
4267                         mask = FLASH_STRAP_MASK;
4268
4269                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4270                         j++, flash++) {
4271
4272                         if ((val & mask) == (flash->strapping & mask)) {
4273                                 bp->flash_info = flash;
4274
4275                                 /* Request access to the flash interface. */
4276                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4277                                         return rc;
4278
4279                                 /* Enable access to flash interface */
4280                                 bnx2_enable_nvram_access(bp);
4281
4282                                 /* Reconfigure the flash interface */
4283                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4284                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4285                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4286                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4287
4288                                 /* Disable access to flash interface */
4289                                 bnx2_disable_nvram_access(bp);
4290                                 bnx2_release_nvram_lock(bp);
4291
4292                                 break;
4293                         }
4294                 }
4295         } /* if (val & 0x40000000) */
4296
4297         if (j == entry_count) {
4298                 bp->flash_info = NULL;
4299                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4300                 return -ENODEV;
4301         }
4302
4303 get_flash_size:
4304         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4305         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4306         if (val)
4307                 bp->flash_size = val;
4308         else
4309                 bp->flash_size = bp->flash_info->total_size;
4310
4311         return rc;
4312 }
4313
4314 static int
4315 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4316                 int buf_size)
4317 {
4318         int rc = 0;
4319         u32 cmd_flags, offset32, len32, extra;
4320
4321         if (buf_size == 0)
4322                 return 0;
4323
4324         /* Request access to the flash interface. */
4325         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4326                 return rc;
4327
4328         /* Enable access to flash interface */
4329         bnx2_enable_nvram_access(bp);
4330
4331         len32 = buf_size;
4332         offset32 = offset;
4333         extra = 0;
4334
4335         cmd_flags = 0;
4336
4337         if (offset32 & 3) {
4338                 u8 buf[4];
4339                 u32 pre_len;
4340
4341                 offset32 &= ~3;
4342                 pre_len = 4 - (offset & 3);
4343
4344                 if (pre_len >= len32) {
4345                         pre_len = len32;
4346                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4347                                     BNX2_NVM_COMMAND_LAST;
4348                 }
4349                 else {
4350                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4351                 }
4352
4353                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4354
4355                 if (rc)
4356                         return rc;
4357
4358                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4359
4360                 offset32 += 4;
4361                 ret_buf += pre_len;
4362                 len32 -= pre_len;
4363         }
4364         if (len32 & 3) {
4365                 extra = 4 - (len32 & 3);
4366                 len32 = (len32 + 4) & ~3;
4367         }
4368
4369         if (len32 == 4) {
4370                 u8 buf[4];
4371
4372                 if (cmd_flags)
4373                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4374                 else
4375                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4376                                     BNX2_NVM_COMMAND_LAST;
4377
4378                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4379
4380                 memcpy(ret_buf, buf, 4 - extra);
4381         }
4382         else if (len32 > 0) {
4383                 u8 buf[4];
4384
4385                 /* Read the first word. */
4386                 if (cmd_flags)
4387                         cmd_flags = 0;
4388                 else
4389                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4390
4391                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4392
4393                 /* Advance to the next dword. */
4394                 offset32 += 4;
4395                 ret_buf += 4;
4396                 len32 -= 4;
4397
4398                 while (len32 > 4 && rc == 0) {
4399                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4400
4401                         /* Advance to the next dword. */
4402                         offset32 += 4;
4403                         ret_buf += 4;
4404                         len32 -= 4;
4405                 }
4406
4407                 if (rc)
4408                         return rc;
4409
4410                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4411                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4412
4413                 memcpy(ret_buf, buf, 4 - extra);
4414         }
4415
4416         /* Disable access to flash interface */
4417         bnx2_disable_nvram_access(bp);
4418
4419         bnx2_release_nvram_lock(bp);
4420
4421         return rc;
4422 }
4423
4424 static int
4425 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4426                 int buf_size)
4427 {
4428         u32 written, offset32, len32;
4429         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4430         int rc = 0;
4431         int align_start, align_end;
4432
4433         buf = data_buf;
4434         offset32 = offset;
4435         len32 = buf_size;
4436         align_start = align_end = 0;
4437
4438         if ((align_start = (offset32 & 3))) {
4439                 offset32 &= ~3;
4440                 len32 += align_start;
4441                 if (len32 < 4)
4442                         len32 = 4;
4443                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4444                         return rc;
4445         }
4446
4447         if (len32 & 3) {
4448                 align_end = 4 - (len32 & 3);
4449                 len32 += align_end;
4450                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4451                         return rc;
4452         }
4453
4454         if (align_start || align_end) {
4455                 align_buf = kmalloc(len32, GFP_KERNEL);
4456                 if (align_buf == NULL)
4457                         return -ENOMEM;
4458                 if (align_start) {
4459                         memcpy(align_buf, start, 4);
4460                 }
4461                 if (align_end) {
4462                         memcpy(align_buf + len32 - 4, end, 4);
4463                 }
4464                 memcpy(align_buf + align_start, data_buf, buf_size);
4465                 buf = align_buf;
4466         }
4467
4468         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4469                 flash_buffer = kmalloc(264, GFP_KERNEL);
4470                 if (flash_buffer == NULL) {
4471                         rc = -ENOMEM;
4472                         goto nvram_write_end;
4473                 }
4474         }
4475
4476         written = 0;
4477         while ((written < len32) && (rc == 0)) {
4478                 u32 page_start, page_end, data_start, data_end;
4479                 u32 addr, cmd_flags;
4480                 int i;
4481
4482                 /* Find the page_start addr */
4483                 page_start = offset32 + written;
4484                 page_start -= (page_start % bp->flash_info->page_size);
4485                 /* Find the page_end addr */
4486                 page_end = page_start + bp->flash_info->page_size;
4487                 /* Find the data_start addr */
4488                 data_start = (written == 0) ? offset32 : page_start;
4489                 /* Find the data_end addr */
4490                 data_end = (page_end > offset32 + len32) ?
4491                         (offset32 + len32) : page_end;
4492
4493                 /* Request access to the flash interface. */
4494                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4495                         goto nvram_write_end;
4496
4497                 /* Enable access to flash interface */
4498                 bnx2_enable_nvram_access(bp);
4499
4500                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4501                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4502                         int j;
4503
4504                         /* Read the whole page into the buffer
4505                          * (non-buffer flash only) */
4506                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4507                                 if (j == (bp->flash_info->page_size - 4)) {
4508                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4509                                 }
4510                                 rc = bnx2_nvram_read_dword(bp,
4511                                         page_start + j,
4512                                         &flash_buffer[j],
4513                                         cmd_flags);
4514
4515                                 if (rc)
4516                                         goto nvram_write_end;
4517
4518                                 cmd_flags = 0;
4519                         }
4520                 }
4521
4522                 /* Enable writes to flash interface (unlock write-protect) */
4523                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4524                         goto nvram_write_end;
4525
4526                 /* Loop to write back the buffer data from page_start to
4527                  * data_start */
4528                 i = 0;
4529                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4530                         /* Erase the page */
4531                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4532                                 goto nvram_write_end;
4533
4534                         /* Re-enable the write again for the actual write */
4535                         bnx2_enable_nvram_write(bp);
4536
4537                         for (addr = page_start; addr < data_start;
4538                                 addr += 4, i += 4) {
4539
4540                                 rc = bnx2_nvram_write_dword(bp, addr,
4541                                         &flash_buffer[i], cmd_flags);
4542
4543                                 if (rc != 0)
4544                                         goto nvram_write_end;
4545
4546                                 cmd_flags = 0;
4547                         }
4548                 }
4549
4550                 /* Loop to write the new data from data_start to data_end */
4551                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4552                         if ((addr == page_end - 4) ||
4553                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4554                                  (addr == data_end - 4))) {
4555
4556                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4557                         }
4558                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4559                                 cmd_flags);
4560
4561                         if (rc != 0)
4562                                 goto nvram_write_end;
4563
4564                         cmd_flags = 0;
4565                         buf += 4;
4566                 }
4567
4568                 /* Loop to write back the buffer data from data_end
4569                  * to page_end */
4570                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4571                         for (addr = data_end; addr < page_end;
4572                                 addr += 4, i += 4) {
4573
4574                                 if (addr == page_end-4) {
4575                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4576                                 }
4577                                 rc = bnx2_nvram_write_dword(bp, addr,
4578                                         &flash_buffer[i], cmd_flags);
4579
4580                                 if (rc != 0)
4581                                         goto nvram_write_end;
4582
4583                                 cmd_flags = 0;
4584                         }
4585                 }
4586
4587                 /* Disable writes to flash interface (lock write-protect) */
4588                 bnx2_disable_nvram_write(bp);
4589
4590                 /* Disable access to flash interface */
4591                 bnx2_disable_nvram_access(bp);
4592                 bnx2_release_nvram_lock(bp);
4593
4594                 /* Increment written */
4595                 written += data_end - data_start;
4596         }
4597
4598 nvram_write_end:
4599         kfree(flash_buffer);
4600         kfree(align_buf);
4601         return rc;
4602 }
4603
4604 static void
4605 bnx2_init_fw_cap(struct bnx2 *bp)
4606 {
4607         u32 val, sig = 0;
4608
4609         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4610         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4611
4612         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4613                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4614
4615         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4616         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4617                 return;
4618
4619         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4620                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4621                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4622         }
4623
4624         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4625             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4626                 u32 link;
4627
4628                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4629
4630                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4631                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4632                         bp->phy_port = PORT_FIBRE;
4633                 else
4634                         bp->phy_port = PORT_TP;
4635
4636                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4637                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4638         }
4639
4640         if (netif_running(bp->dev) && sig)
4641                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4642 }
4643
4644 static void
4645 bnx2_setup_msix_tbl(struct bnx2 *bp)
4646 {
4647         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4648
4649         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4650         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4651 }
4652
4653 static int
4654 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4655 {
4656         u32 val;
4657         int i, rc = 0;
4658         u8 old_port;
4659
4660         /* Wait for the current PCI transaction to complete before
4661          * issuing a reset. */
4662         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4663                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4664                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4665                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4666                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4667         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4668         udelay(5);
4669
4670         /* Wait for the firmware to tell us it is ok to issue a reset. */
4671         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4672
4673         /* Deposit a driver reset signature so the firmware knows that
4674          * this is a soft reset. */
4675         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4676                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4677
4678         /* Do a dummy read to force the chip to complete all current transaction
4679          * before we issue a reset. */
4680         val = REG_RD(bp, BNX2_MISC_ID);
4681
4682         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4683                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4684                 REG_RD(bp, BNX2_MISC_COMMAND);
4685                 udelay(5);
4686
4687                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4688                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4689
4690                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4691
4692         } else {
4693                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4694                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4695                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4696
4697                 /* Chip reset. */
4698                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4699
4700                 /* Reading back any register after chip reset will hang the
4701                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4702                  * of margin for write posting.
4703                  */
4704                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4705                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4706                         msleep(20);
4707
4708                 /* Reset takes approximate 30 usec */
4709                 for (i = 0; i < 10; i++) {
4710                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4711                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4712                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4713                                 break;
4714                         udelay(10);
4715                 }
4716
4717                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4718                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4719                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4720                         return -EBUSY;
4721                 }
4722         }
4723
4724         /* Make sure byte swapping is properly configured. */
4725         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4726         if (val != 0x01020304) {
4727                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4728                 return -ENODEV;
4729         }
4730
4731         /* Wait for the firmware to finish its initialization. */
4732         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4733         if (rc)
4734                 return rc;
4735
4736         spin_lock_bh(&bp->phy_lock);
4737         old_port = bp->phy_port;
4738         bnx2_init_fw_cap(bp);
4739         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4740             old_port != bp->phy_port)
4741                 bnx2_set_default_remote_link(bp);
4742         spin_unlock_bh(&bp->phy_lock);
4743
4744         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4745                 /* Adjust the voltage regular to two steps lower.  The default
4746                  * of this register is 0x0000000e. */
4747                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4748
4749                 /* Remove bad rbuf memory from the free pool. */
4750                 rc = bnx2_alloc_bad_rbuf(bp);
4751         }
4752
4753         if (bp->flags & BNX2_FLAG_USING_MSIX)
4754                 bnx2_setup_msix_tbl(bp);
4755
4756         return rc;
4757 }
4758
4759 static int
4760 bnx2_init_chip(struct bnx2 *bp)
4761 {
4762         u32 val, mtu;
4763         int rc, i;
4764
4765         /* Make sure the interrupt is not active. */
4766         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4767
4768         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4769               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4770 #ifdef __BIG_ENDIAN
4771               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4772 #endif
4773               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4774               DMA_READ_CHANS << 12 |
4775               DMA_WRITE_CHANS << 16;
4776
4777         val |= (0x2 << 20) | (1 << 11);
4778
4779         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4780                 val |= (1 << 23);
4781
4782         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4783             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4784                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4785
4786         REG_WR(bp, BNX2_DMA_CONFIG, val);
4787
4788         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4789                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4790                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4791                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4792         }
4793
4794         if (bp->flags & BNX2_FLAG_PCIX) {
4795                 u16 val16;
4796
4797                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4798                                      &val16);
4799                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                       val16 & ~PCI_X_CMD_ERO);
4801         }
4802
4803         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4804                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4805                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4806                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4807
4808         /* Initialize context mapping and zero out the quick contexts.  The
4809          * context block must have already been enabled. */
4810         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4811                 rc = bnx2_init_5709_context(bp);
4812                 if (rc)
4813                         return rc;
4814         } else
4815                 bnx2_init_context(bp);
4816
4817         if ((rc = bnx2_init_cpus(bp)) != 0)
4818                 return rc;
4819
4820         bnx2_init_nvram(bp);
4821
4822         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4823
4824         val = REG_RD(bp, BNX2_MQ_CONFIG);
4825         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4826         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4827         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4828                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4829                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4830                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4831         }
4832
4833         REG_WR(bp, BNX2_MQ_CONFIG, val);
4834
4835         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4836         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4837         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4838
4839         val = (BCM_PAGE_BITS - 8) << 24;
4840         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4841
4842         /* Configure page size. */
4843         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4844         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4845         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4846         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4847
4848         val = bp->mac_addr[0] +
4849               (bp->mac_addr[1] << 8) +
4850               (bp->mac_addr[2] << 16) +
4851               bp->mac_addr[3] +
4852               (bp->mac_addr[4] << 8) +
4853               (bp->mac_addr[5] << 16);
4854         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4855
4856         /* Program the MTU.  Also include 4 bytes for CRC32. */
4857         mtu = bp->dev->mtu;
4858         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4859         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4860                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4861         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4862
4863         if (mtu < 1500)
4864                 mtu = 1500;
4865
4866         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4867         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4868         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4869
4870         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4871         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4872                 bp->bnx2_napi[i].last_status_idx = 0;
4873
4874         bp->idle_chk_status_idx = 0xffff;
4875
4876         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4877
4878         /* Set up how to generate a link change interrupt. */
4879         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4880
4881         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4882                (u64) bp->status_blk_mapping & 0xffffffff);
4883         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4884
4885         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4886                (u64) bp->stats_blk_mapping & 0xffffffff);
4887         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4888                (u64) bp->stats_blk_mapping >> 32);
4889
4890         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4891                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4892
4893         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4894                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4895
4896         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4897                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4898
4899         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4900
4901         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4902
4903         REG_WR(bp, BNX2_HC_COM_TICKS,
4904                (bp->com_ticks_int << 16) | bp->com_ticks);
4905
4906         REG_WR(bp, BNX2_HC_CMD_TICKS,
4907                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4908
4909         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4910                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4911         else
4912                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4913         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4914
4915         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4916                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4917         else {
4918                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4919                       BNX2_HC_CONFIG_COLLECT_STATS;
4920         }
4921
4922         if (bp->irq_nvecs > 1) {
4923                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4924                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4925
4926                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4927         }
4928
4929         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4930                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4931
4932         REG_WR(bp, BNX2_HC_CONFIG, val);
4933
4934         for (i = 1; i < bp->irq_nvecs; i++) {
4935                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4936                            BNX2_HC_SB_CONFIG_1;
4937
4938                 REG_WR(bp, base,
4939                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4940                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4941                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4942
4943                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4944                         (bp->tx_quick_cons_trip_int << 16) |
4945                          bp->tx_quick_cons_trip);
4946
4947                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4948                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4949
4950                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4951                        (bp->rx_quick_cons_trip_int << 16) |
4952                         bp->rx_quick_cons_trip);
4953
4954                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4955                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4956         }
4957
4958         /* Clear internal stats counters. */
4959         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4960
4961         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4962
4963         /* Initialize the receive filter. */
4964         bnx2_set_rx_mode(bp->dev);
4965
4966         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4967                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4968                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4969                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4970         }
4971         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4972                           1, 0);
4973
4974         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4975         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4976
4977         udelay(20);
4978
4979         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4980
4981         return rc;
4982 }
4983
4984 static void
4985 bnx2_clear_ring_states(struct bnx2 *bp)
4986 {
4987         struct bnx2_napi *bnapi;
4988         struct bnx2_tx_ring_info *txr;
4989         struct bnx2_rx_ring_info *rxr;
4990         int i;
4991
4992         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4993                 bnapi = &bp->bnx2_napi[i];
4994                 txr = &bnapi->tx_ring;
4995                 rxr = &bnapi->rx_ring;
4996
4997                 txr->tx_cons = 0;
4998                 txr->hw_tx_cons = 0;
4999                 rxr->rx_prod_bseq = 0;
5000                 rxr->rx_prod = 0;
5001                 rxr->rx_cons = 0;
5002                 rxr->rx_pg_prod = 0;
5003                 rxr->rx_pg_cons = 0;
5004         }
5005 }
5006
5007 static void
5008 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5009 {
5010         u32 val, offset0, offset1, offset2, offset3;
5011         u32 cid_addr = GET_CID_ADDR(cid);
5012
5013         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5014                 offset0 = BNX2_L2CTX_TYPE_XI;
5015                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5016                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5017                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5018         } else {
5019                 offset0 = BNX2_L2CTX_TYPE;
5020                 offset1 = BNX2_L2CTX_CMD_TYPE;
5021                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5022                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5023         }
5024         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5025         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5026
5027         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5028         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5029
5030         val = (u64) txr->tx_desc_mapping >> 32;
5031         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5032
5033         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5034         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5035 }
5036
5037 static void
5038 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5039 {
5040         struct tx_bd *txbd;
5041         u32 cid = TX_CID;
5042         struct bnx2_napi *bnapi;
5043         struct bnx2_tx_ring_info *txr;
5044
5045         bnapi = &bp->bnx2_napi[ring_num];
5046         txr = &bnapi->tx_ring;
5047
5048         if (ring_num == 0)
5049                 cid = TX_CID;
5050         else
5051                 cid = TX_TSS_CID + ring_num - 1;
5052
5053         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5054
5055         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5056
5057         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5058         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5059
5060         txr->tx_prod = 0;
5061         txr->tx_prod_bseq = 0;
5062
5063         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5064         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5065
5066         bnx2_init_tx_context(bp, cid, txr);
5067 }
5068
5069 static void
5070 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5071                      int num_rings)
5072 {
5073         int i;
5074         struct rx_bd *rxbd;
5075
5076         for (i = 0; i < num_rings; i++) {
5077                 int j;
5078
5079                 rxbd = &rx_ring[i][0];
5080                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5081                         rxbd->rx_bd_len = buf_size;
5082                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5083                 }
5084                 if (i == (num_rings - 1))
5085                         j = 0;
5086                 else
5087                         j = i + 1;
5088                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5089                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5090         }
5091 }
5092
5093 static void
5094 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5095 {
5096         int i;
5097         u16 prod, ring_prod;
5098         u32 cid, rx_cid_addr, val;
5099         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5100         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5101
5102         if (ring_num == 0)
5103                 cid = RX_CID;
5104         else
5105                 cid = RX_RSS_CID + ring_num - 1;
5106
5107         rx_cid_addr = GET_CID_ADDR(cid);
5108
5109         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5110                              bp->rx_buf_use_size, bp->rx_max_ring);
5111
5112         bnx2_init_rx_context(bp, cid);
5113
5114         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5115                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5116                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5117         }
5118
5119         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5120         if (bp->rx_pg_ring_size) {
5121                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5122                                      rxr->rx_pg_desc_mapping,
5123                                      PAGE_SIZE, bp->rx_max_pg_ring);
5124                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5125                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5126                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5127                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5128
5129                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5130                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5131
5132                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5133                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5134
5135                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5136                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5137         }
5138
5139         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5140         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5141
5142         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5143         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5144
5145         ring_prod = prod = rxr->rx_pg_prod;
5146         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5147                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5148                         break;
5149                 prod = NEXT_RX_BD(prod);
5150                 ring_prod = RX_PG_RING_IDX(prod);
5151         }
5152         rxr->rx_pg_prod = prod;
5153
5154         ring_prod = prod = rxr->rx_prod;
5155         for (i = 0; i < bp->rx_ring_size; i++) {
5156                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5157                         break;
5158                 prod = NEXT_RX_BD(prod);
5159                 ring_prod = RX_RING_IDX(prod);
5160         }
5161         rxr->rx_prod = prod;
5162
5163         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5164         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5165         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5166
5167         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5168         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5169
5170         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5171 }
5172
5173 static void
5174 bnx2_init_all_rings(struct bnx2 *bp)
5175 {
5176         int i;
5177         u32 val;
5178
5179         bnx2_clear_ring_states(bp);
5180
5181         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5182         for (i = 0; i < bp->num_tx_rings; i++)
5183                 bnx2_init_tx_ring(bp, i);
5184
5185         if (bp->num_tx_rings > 1)
5186                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5187                        (TX_TSS_CID << 7));
5188
5189         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5190         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5191
5192         for (i = 0; i < bp->num_rx_rings; i++)
5193                 bnx2_init_rx_ring(bp, i);
5194
5195         if (bp->num_rx_rings > 1) {
5196                 u32 tbl_32;
5197                 u8 *tbl = (u8 *) &tbl_32;
5198
5199                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5200                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5201
5202                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5203                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5204                         if ((i % 4) == 3)
5205                                 bnx2_reg_wr_ind(bp,
5206                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5207                                                 cpu_to_be32(tbl_32));
5208                 }
5209
5210                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5211                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5212
5213                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5214
5215         }
5216 }
5217
5218 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5219 {
5220         u32 max, num_rings = 1;
5221
5222         while (ring_size > MAX_RX_DESC_CNT) {
5223                 ring_size -= MAX_RX_DESC_CNT;
5224                 num_rings++;
5225         }
5226         /* round to next power of 2 */
5227         max = max_size;
5228         while ((max & num_rings) == 0)
5229                 max >>= 1;
5230
5231         if (num_rings != max)
5232                 max <<= 1;
5233
5234         return max;
5235 }
5236
5237 static void
5238 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5239 {
5240         u32 rx_size, rx_space, jumbo_size;
5241
5242         /* 8 for CRC and VLAN */
5243         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5244
5245         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5246                 sizeof(struct skb_shared_info);
5247
5248         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5249         bp->rx_pg_ring_size = 0;
5250         bp->rx_max_pg_ring = 0;
5251         bp->rx_max_pg_ring_idx = 0;
5252         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5253                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5254
5255                 jumbo_size = size * pages;
5256                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5257                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5258
5259                 bp->rx_pg_ring_size = jumbo_size;
5260                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5261                                                         MAX_RX_PG_RINGS);
5262                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5263                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5264                 bp->rx_copy_thresh = 0;
5265         }
5266
5267         bp->rx_buf_use_size = rx_size;
5268         /* hw alignment */
5269         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5270         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5271         bp->rx_ring_size = size;
5272         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5273         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5274 }
5275
5276 static void
5277 bnx2_free_tx_skbs(struct bnx2 *bp)
5278 {
5279         int i;
5280
5281         for (i = 0; i < bp->num_tx_rings; i++) {
5282                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5283                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5284                 int j;
5285
5286                 if (txr->tx_buf_ring == NULL)
5287                         continue;
5288
5289                 for (j = 0; j < TX_DESC_CNT; ) {
5290                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5291                         struct sk_buff *skb = tx_buf->skb;
5292
5293                         if (skb == NULL) {
5294                                 j++;
5295                                 continue;
5296                         }
5297
5298                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5299
5300                         tx_buf->skb = NULL;
5301
5302                         j += skb_shinfo(skb)->nr_frags + 1;
5303                         dev_kfree_skb(skb);
5304                 }
5305         }
5306 }
5307
5308 static void
5309 bnx2_free_rx_skbs(struct bnx2 *bp)
5310 {
5311         int i;
5312
5313         for (i = 0; i < bp->num_rx_rings; i++) {
5314                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5315                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5316                 int j;
5317
5318                 if (rxr->rx_buf_ring == NULL)
5319                         return;
5320
5321                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5322                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5323                         struct sk_buff *skb = rx_buf->skb;
5324
5325                         if (skb == NULL)
5326                                 continue;
5327
5328                         pci_unmap_single(bp->pdev,
5329                                          pci_unmap_addr(rx_buf, mapping),
5330                                          bp->rx_buf_use_size,
5331                                          PCI_DMA_FROMDEVICE);
5332
5333                         rx_buf->skb = NULL;
5334
5335                         dev_kfree_skb(skb);
5336                 }
5337                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5338                         bnx2_free_rx_page(bp, rxr, j);
5339         }
5340 }
5341
5342 static void
5343 bnx2_free_skbs(struct bnx2 *bp)
5344 {
5345         bnx2_free_tx_skbs(bp);
5346         bnx2_free_rx_skbs(bp);
5347 }
5348
5349 static int
5350 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5351 {
5352         int rc;
5353
5354         rc = bnx2_reset_chip(bp, reset_code);
5355         bnx2_free_skbs(bp);
5356         if (rc)
5357                 return rc;
5358
5359         if ((rc = bnx2_init_chip(bp)) != 0)
5360                 return rc;
5361
5362         bnx2_init_all_rings(bp);
5363         return 0;
5364 }
5365
5366 static int
5367 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5368 {
5369         int rc;
5370
5371         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5372                 return rc;
5373
5374         spin_lock_bh(&bp->phy_lock);
5375         bnx2_init_phy(bp, reset_phy);
5376         bnx2_set_link(bp);
5377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5378                 bnx2_remote_phy_event(bp);
5379         spin_unlock_bh(&bp->phy_lock);
5380         return 0;
5381 }
5382
5383 static int
5384 bnx2_shutdown_chip(struct bnx2 *bp)
5385 {
5386         u32 reset_code;
5387
5388         if (bp->flags & BNX2_FLAG_NO_WOL)
5389                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5390         else if (bp->wol)
5391                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5392         else
5393                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5394
5395         return bnx2_reset_chip(bp, reset_code);
5396 }
5397
5398 static int
5399 bnx2_test_registers(struct bnx2 *bp)
5400 {
5401         int ret;
5402         int i, is_5709;
5403         static const struct {
5404                 u16   offset;
5405                 u16   flags;
5406 #define BNX2_FL_NOT_5709        1
5407                 u32   rw_mask;
5408                 u32   ro_mask;
5409         } reg_tbl[] = {
5410                 { 0x006c, 0, 0x00000000, 0x0000003f },
5411                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5412                 { 0x0094, 0, 0x00000000, 0x00000000 },
5413
5414                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5415                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5416                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5418                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5419                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5420                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5421                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5422                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5423
5424                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5425                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5426                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5427                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5428                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5429                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5430
5431                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5432                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5433                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5434
5435                 { 0x1000, 0, 0x00000000, 0x00000001 },
5436                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5437
5438                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5439                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5440                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5441                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5442                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5443                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5444                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5445                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5446                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5447                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5448
5449                 { 0x1800, 0, 0x00000000, 0x00000001 },
5450                 { 0x1804, 0, 0x00000000, 0x00000003 },
5451
5452                 { 0x2800, 0, 0x00000000, 0x00000001 },
5453                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5454                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5455                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5456                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5457                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5458                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5459                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5460                 { 0x2840, 0, 0x00000000, 0xffffffff },
5461                 { 0x2844, 0, 0x00000000, 0xffffffff },
5462                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5463                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5464
5465                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5466                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5467
5468                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5469                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5470                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5471                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5472                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5473                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5474                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5475                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5476                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5477
5478                 { 0x5004, 0, 0x00000000, 0x0000007f },
5479                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5480
5481                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5482                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5483                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5484                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5485                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5486                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5487                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5488                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5489                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5490
5491                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5492                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5493                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5494                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5495                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5496                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5497                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5498                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5499                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5500                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5501                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5502                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5503                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5504                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5505                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5506                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5507                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5508                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5509                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5510                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5511                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5512                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5513                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5514
5515                 { 0xffff, 0, 0x00000000, 0x00000000 },
5516         };
5517
5518         ret = 0;
5519         is_5709 = 0;
5520         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5521                 is_5709 = 1;
5522
5523         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5524                 u32 offset, rw_mask, ro_mask, save_val, val;
5525                 u16 flags = reg_tbl[i].flags;
5526
5527                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5528                         continue;
5529
5530                 offset = (u32) reg_tbl[i].offset;
5531                 rw_mask = reg_tbl[i].rw_mask;
5532                 ro_mask = reg_tbl[i].ro_mask;
5533
5534                 save_val = readl(bp->regview + offset);
5535
5536                 writel(0, bp->regview + offset);
5537
5538                 val = readl(bp->regview + offset);
5539                 if ((val & rw_mask) != 0) {
5540                         goto reg_test_err;
5541                 }
5542
5543                 if ((val & ro_mask) != (save_val & ro_mask)) {
5544                         goto reg_test_err;
5545                 }
5546
5547                 writel(0xffffffff, bp->regview + offset);
5548
5549                 val = readl(bp->regview + offset);
5550                 if ((val & rw_mask) != rw_mask) {
5551                         goto reg_test_err;
5552                 }
5553
5554                 if ((val & ro_mask) != (save_val & ro_mask)) {
5555                         goto reg_test_err;
5556                 }
5557
5558                 writel(save_val, bp->regview + offset);
5559                 continue;
5560
5561 reg_test_err:
5562                 writel(save_val, bp->regview + offset);
5563                 ret = -ENODEV;
5564                 break;
5565         }
5566         return ret;
5567 }
5568
5569 static int
5570 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5571 {
5572         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5573                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5574         int i;
5575
5576         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5577                 u32 offset;
5578
5579                 for (offset = 0; offset < size; offset += 4) {
5580
5581                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5582
5583                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5584                                 test_pattern[i]) {
5585                                 return -ENODEV;
5586                         }
5587                 }
5588         }
5589         return 0;
5590 }
5591
5592 static int
5593 bnx2_test_memory(struct bnx2 *bp)
5594 {
5595         int ret = 0;
5596         int i;
5597         static struct mem_entry {
5598                 u32   offset;
5599                 u32   len;
5600         } mem_tbl_5706[] = {
5601                 { 0x60000,  0x4000 },
5602                 { 0xa0000,  0x3000 },
5603                 { 0xe0000,  0x4000 },
5604                 { 0x120000, 0x4000 },
5605                 { 0x1a0000, 0x4000 },
5606                 { 0x160000, 0x4000 },
5607                 { 0xffffffff, 0    },
5608         },
5609         mem_tbl_5709[] = {
5610                 { 0x60000,  0x4000 },
5611                 { 0xa0000,  0x3000 },
5612                 { 0xe0000,  0x4000 },
5613                 { 0x120000, 0x4000 },
5614                 { 0x1a0000, 0x4000 },
5615                 { 0xffffffff, 0    },
5616         };
5617         struct mem_entry *mem_tbl;
5618
5619         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5620                 mem_tbl = mem_tbl_5709;
5621         else
5622                 mem_tbl = mem_tbl_5706;
5623
5624         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5625                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5626                         mem_tbl[i].len)) != 0) {
5627                         return ret;
5628                 }
5629         }
5630
5631         return ret;
5632 }
5633
5634 #define BNX2_MAC_LOOPBACK       0
5635 #define BNX2_PHY_LOOPBACK       1
5636
5637 static int
5638 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5639 {
5640         unsigned int pkt_size, num_pkts, i;
5641         struct sk_buff *skb, *rx_skb;
5642         unsigned char *packet;
5643         u16 rx_start_idx, rx_idx;
5644         dma_addr_t map;
5645         struct tx_bd *txbd;
5646         struct sw_bd *rx_buf;
5647         struct l2_fhdr *rx_hdr;
5648         int ret = -ENODEV;
5649         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5650         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5651         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5652
5653         tx_napi = bnapi;
5654
5655         txr = &tx_napi->tx_ring;
5656         rxr = &bnapi->rx_ring;
5657         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5658                 bp->loopback = MAC_LOOPBACK;
5659                 bnx2_set_mac_loopback(bp);
5660         }
5661         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5662                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5663                         return 0;
5664
5665                 bp->loopback = PHY_LOOPBACK;
5666                 bnx2_set_phy_loopback(bp);
5667         }
5668         else
5669                 return -EINVAL;
5670
5671         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5672         skb = netdev_alloc_skb(bp->dev, pkt_size);
5673         if (!skb)
5674                 return -ENOMEM;
5675         packet = skb_put(skb, pkt_size);
5676         memcpy(packet, bp->dev->dev_addr, 6);
5677         memset(packet + 6, 0x0, 8);
5678         for (i = 14; i < pkt_size; i++)
5679                 packet[i] = (unsigned char) (i & 0xff);
5680
5681         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5682                 dev_kfree_skb(skb);
5683                 return -EIO;
5684         }
5685         map = skb_shinfo(skb)->dma_head;
5686
5687         REG_WR(bp, BNX2_HC_COMMAND,
5688                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5689
5690         REG_RD(bp, BNX2_HC_COMMAND);
5691
5692         udelay(5);
5693         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5694
5695         num_pkts = 0;
5696
5697         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5698
5699         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5700         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5701         txbd->tx_bd_mss_nbytes = pkt_size;
5702         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5703
5704         num_pkts++;
5705         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5706         txr->tx_prod_bseq += pkt_size;
5707
5708         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5709         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5710
5711         udelay(100);
5712
5713         REG_WR(bp, BNX2_HC_COMMAND,
5714                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5715
5716         REG_RD(bp, BNX2_HC_COMMAND);
5717
5718         udelay(5);
5719
5720         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5721         dev_kfree_skb(skb);
5722
5723         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5724                 goto loopback_test_done;
5725
5726         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5727         if (rx_idx != rx_start_idx + num_pkts) {
5728                 goto loopback_test_done;
5729         }
5730
5731         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5732         rx_skb = rx_buf->skb;
5733
5734         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5735         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5736
5737         pci_dma_sync_single_for_cpu(bp->pdev,
5738                 pci_unmap_addr(rx_buf, mapping),
5739                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5740
5741         if (rx_hdr->l2_fhdr_status &
5742                 (L2_FHDR_ERRORS_BAD_CRC |
5743                 L2_FHDR_ERRORS_PHY_DECODE |
5744                 L2_FHDR_ERRORS_ALIGNMENT |
5745                 L2_FHDR_ERRORS_TOO_SHORT |
5746                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5747
5748                 goto loopback_test_done;
5749         }
5750
5751         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5752                 goto loopback_test_done;
5753         }
5754
5755         for (i = 14; i < pkt_size; i++) {
5756                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5757                         goto loopback_test_done;
5758                 }
5759         }
5760
5761         ret = 0;
5762
5763 loopback_test_done:
5764         bp->loopback = 0;
5765         return ret;
5766 }
5767
5768 #define BNX2_MAC_LOOPBACK_FAILED        1
5769 #define BNX2_PHY_LOOPBACK_FAILED        2
5770 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5771                                          BNX2_PHY_LOOPBACK_FAILED)
5772
5773 static int
5774 bnx2_test_loopback(struct bnx2 *bp)
5775 {
5776         int rc = 0;
5777
5778         if (!netif_running(bp->dev))
5779                 return BNX2_LOOPBACK_FAILED;
5780
5781         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5782         spin_lock_bh(&bp->phy_lock);
5783         bnx2_init_phy(bp, 1);
5784         spin_unlock_bh(&bp->phy_lock);
5785         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5786                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5787         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5788                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5789         return rc;
5790 }
5791
5792 #define NVRAM_SIZE 0x200
5793 #define CRC32_RESIDUAL 0xdebb20e3
5794
5795 static int
5796 bnx2_test_nvram(struct bnx2 *bp)
5797 {
5798         __be32 buf[NVRAM_SIZE / 4];
5799         u8 *data = (u8 *) buf;
5800         int rc = 0;
5801         u32 magic, csum;
5802
5803         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5804                 goto test_nvram_done;
5805
5806         magic = be32_to_cpu(buf[0]);
5807         if (magic != 0x669955aa) {
5808                 rc = -ENODEV;
5809                 goto test_nvram_done;
5810         }
5811
5812         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5813                 goto test_nvram_done;
5814
5815         csum = ether_crc_le(0x100, data);
5816         if (csum != CRC32_RESIDUAL) {
5817                 rc = -ENODEV;
5818                 goto test_nvram_done;
5819         }
5820
5821         csum = ether_crc_le(0x100, data + 0x100);
5822         if (csum != CRC32_RESIDUAL) {
5823                 rc = -ENODEV;
5824         }
5825
5826 test_nvram_done:
5827         return rc;
5828 }
5829
5830 static int
5831 bnx2_test_link(struct bnx2 *bp)
5832 {
5833         u32 bmsr;
5834
5835         if (!netif_running(bp->dev))
5836                 return -ENODEV;
5837
5838         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5839                 if (bp->link_up)
5840                         return 0;
5841                 return -ENODEV;
5842         }
5843         spin_lock_bh(&bp->phy_lock);
5844         bnx2_enable_bmsr1(bp);
5845         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5846         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5847         bnx2_disable_bmsr1(bp);
5848         spin_unlock_bh(&bp->phy_lock);
5849
5850         if (bmsr & BMSR_LSTATUS) {
5851                 return 0;
5852         }
5853         return -ENODEV;
5854 }
5855
5856 static int
5857 bnx2_test_intr(struct bnx2 *bp)
5858 {
5859         int i;
5860         u16 status_idx;
5861
5862         if (!netif_running(bp->dev))
5863                 return -ENODEV;
5864
5865         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5866
5867         /* This register is not touched during run-time. */
5868         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5869         REG_RD(bp, BNX2_HC_COMMAND);
5870
5871         for (i = 0; i < 10; i++) {
5872                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5873                         status_idx) {
5874
5875                         break;
5876                 }
5877
5878                 msleep_interruptible(10);
5879         }
5880         if (i < 10)
5881                 return 0;
5882
5883         return -ENODEV;
5884 }
5885
5886 /* Determining link for parallel detection. */
5887 static int
5888 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5889 {
5890         u32 mode_ctl, an_dbg, exp;
5891
5892         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5893                 return 0;
5894
5895         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5896         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5897
5898         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5899                 return 0;
5900
5901         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5902         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5903         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5904
5905         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5906                 return 0;
5907
5908         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5909         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5910         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5911
5912         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5913                 return 0;
5914
5915         return 1;
5916 }
5917
5918 static void
5919 bnx2_5706_serdes_timer(struct bnx2 *bp)
5920 {
5921         int check_link = 1;
5922
5923         spin_lock(&bp->phy_lock);
5924         if (bp->serdes_an_pending) {
5925                 bp->serdes_an_pending--;
5926                 check_link = 0;
5927         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5928                 u32 bmcr;
5929
5930                 bp->current_interval = BNX2_TIMER_INTERVAL;
5931
5932                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5933
5934                 if (bmcr & BMCR_ANENABLE) {
5935                         if (bnx2_5706_serdes_has_link(bp)) {
5936                                 bmcr &= ~BMCR_ANENABLE;
5937                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5938                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5939                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5940                         }
5941                 }
5942         }
5943         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5944                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5945                 u32 phy2;
5946
5947                 bnx2_write_phy(bp, 0x17, 0x0f01);
5948                 bnx2_read_phy(bp, 0x15, &phy2);
5949                 if (phy2 & 0x20) {
5950                         u32 bmcr;
5951
5952                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5953                         bmcr |= BMCR_ANENABLE;
5954                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5955
5956                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5957                 }
5958         } else
5959                 bp->current_interval = BNX2_TIMER_INTERVAL;
5960
5961         if (check_link) {
5962                 u32 val;
5963
5964                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5965                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5966                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5967
5968                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5969                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5970                                 bnx2_5706s_force_link_dn(bp, 1);
5971                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5972                         } else
5973                                 bnx2_set_link(bp);
5974                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5975                         bnx2_set_link(bp);
5976         }
5977         spin_unlock(&bp->phy_lock);
5978 }
5979
5980 static void
5981 bnx2_5708_serdes_timer(struct bnx2 *bp)
5982 {
5983         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5984                 return;
5985
5986         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5987                 bp->serdes_an_pending = 0;
5988                 return;
5989         }
5990
5991         spin_lock(&bp->phy_lock);
5992         if (bp->serdes_an_pending)
5993                 bp->serdes_an_pending--;
5994         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5995                 u32 bmcr;
5996
5997                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5998                 if (bmcr & BMCR_ANENABLE) {
5999                         bnx2_enable_forced_2g5(bp);
6000                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6001                 } else {
6002                         bnx2_disable_forced_2g5(bp);
6003                         bp->serdes_an_pending = 2;
6004                         bp->current_interval = BNX2_TIMER_INTERVAL;
6005                 }
6006
6007         } else
6008                 bp->current_interval = BNX2_TIMER_INTERVAL;
6009
6010         spin_unlock(&bp->phy_lock);
6011 }
6012
6013 static void
6014 bnx2_timer(unsigned long data)
6015 {
6016         struct bnx2 *bp = (struct bnx2 *) data;
6017
6018         if (!netif_running(bp->dev))
6019                 return;
6020
6021         if (atomic_read(&bp->intr_sem) != 0)
6022                 goto bnx2_restart_timer;
6023
6024         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6025              BNX2_FLAG_USING_MSI)
6026                 bnx2_chk_missed_msi(bp);
6027
6028         bnx2_send_heart_beat(bp);
6029
6030         bp->stats_blk->stat_FwRxDrop =
6031                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6032
6033         /* workaround occasional corrupted counters */
6034         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6035                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6036                                             BNX2_HC_COMMAND_STATS_NOW);
6037
6038         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6039                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6040                         bnx2_5706_serdes_timer(bp);
6041                 else
6042                         bnx2_5708_serdes_timer(bp);
6043         }
6044
6045 bnx2_restart_timer:
6046         mod_timer(&bp->timer, jiffies + bp->current_interval);
6047 }
6048
6049 static int
6050 bnx2_request_irq(struct bnx2 *bp)
6051 {
6052         unsigned long flags;
6053         struct bnx2_irq *irq;
6054         int rc = 0, i;
6055
6056         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6057                 flags = 0;
6058         else
6059                 flags = IRQF_SHARED;
6060
6061         for (i = 0; i < bp->irq_nvecs; i++) {
6062                 irq = &bp->irq_tbl[i];
6063                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6064                                  &bp->bnx2_napi[i]);
6065                 if (rc)
6066                         break;
6067                 irq->requested = 1;
6068