sfc: 10Xpress: Report support for pause frames
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.2"
63 #define DRV_MODULE_RELDATE      "Aug 21, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         mutex_lock(&bp->cnic_lock);
405         cp->drv_state = 0;
406         bnapi->cnic_present = 0;
407         rcu_assign_pointer(bp->cnic_ops, NULL);
408         mutex_unlock(&bp->cnic_lock);
409         synchronize_rcu();
410         return 0;
411 }
412
413 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
414 {
415         struct bnx2 *bp = netdev_priv(dev);
416         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
417
418         cp->drv_owner = THIS_MODULE;
419         cp->chip_id = bp->chip_id;
420         cp->pdev = bp->pdev;
421         cp->io_base = bp->regview;
422         cp->drv_ctl = bnx2_drv_ctl;
423         cp->drv_register_cnic = bnx2_register_cnic;
424         cp->drv_unregister_cnic = bnx2_unregister_cnic;
425
426         return cp;
427 }
428 EXPORT_SYMBOL(bnx2_cnic_probe);
429
430 static void
431 bnx2_cnic_stop(struct bnx2 *bp)
432 {
433         struct cnic_ops *c_ops;
434         struct cnic_ctl_info info;
435
436         mutex_lock(&bp->cnic_lock);
437         c_ops = bp->cnic_ops;
438         if (c_ops) {
439                 info.cmd = CNIC_CTL_STOP_CMD;
440                 c_ops->cnic_ctl(bp->cnic_data, &info);
441         }
442         mutex_unlock(&bp->cnic_lock);
443 }
444
445 static void
446 bnx2_cnic_start(struct bnx2 *bp)
447 {
448         struct cnic_ops *c_ops;
449         struct cnic_ctl_info info;
450
451         mutex_lock(&bp->cnic_lock);
452         c_ops = bp->cnic_ops;
453         if (c_ops) {
454                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
455                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
456
457                         bnapi->cnic_tag = bnapi->last_status_idx;
458                 }
459                 info.cmd = CNIC_CTL_START_CMD;
460                 c_ops->cnic_ctl(bp->cnic_data, &info);
461         }
462         mutex_unlock(&bp->cnic_lock);
463 }
464
465 #else
466
467 static void
468 bnx2_cnic_stop(struct bnx2 *bp)
469 {
470 }
471
472 static void
473 bnx2_cnic_start(struct bnx2 *bp)
474 {
475 }
476
477 #endif
478
479 static int
480 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
481 {
482         u32 val1;
483         int i, ret;
484
485         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
486                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
488
489                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
490                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491
492                 udelay(40);
493         }
494
495         val1 = (bp->phy_addr << 21) | (reg << 16) |
496                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
497                 BNX2_EMAC_MDIO_COMM_START_BUSY;
498         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
499
500         for (i = 0; i < 50; i++) {
501                 udelay(10);
502
503                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
505                         udelay(5);
506
507                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509
510                         break;
511                 }
512         }
513
514         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
515                 *val = 0x0;
516                 ret = -EBUSY;
517         }
518         else {
519                 *val = val1;
520                 ret = 0;
521         }
522
523         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
524                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
526
527                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
528                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529
530                 udelay(40);
531         }
532
533         return ret;
534 }
535
536 static int
537 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
538 {
539         u32 val1;
540         int i, ret;
541
542         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
543                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
545
546                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
547                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548
549                 udelay(40);
550         }
551
552         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
553                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
554                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
555         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
556
557         for (i = 0; i < 50; i++) {
558                 udelay(10);
559
560                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
561                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562                         udelay(5);
563                         break;
564                 }
565         }
566
567         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
568                 ret = -EBUSY;
569         else
570                 ret = 0;
571
572         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
573                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
575
576                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
577                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578
579                 udelay(40);
580         }
581
582         return ret;
583 }
584
585 static void
586 bnx2_disable_int(struct bnx2 *bp)
587 {
588         int i;
589         struct bnx2_napi *bnapi;
590
591         for (i = 0; i < bp->irq_nvecs; i++) {
592                 bnapi = &bp->bnx2_napi[i];
593                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
594                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
595         }
596         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
597 }
598
599 static void
600 bnx2_enable_int(struct bnx2 *bp)
601 {
602         int i;
603         struct bnx2_napi *bnapi;
604
605         for (i = 0; i < bp->irq_nvecs; i++) {
606                 bnapi = &bp->bnx2_napi[i];
607
608                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
611                        bnapi->last_status_idx);
612
613                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
614                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
615                        bnapi->last_status_idx);
616         }
617         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
618 }
619
620 static void
621 bnx2_disable_int_sync(struct bnx2 *bp)
622 {
623         int i;
624
625         atomic_inc(&bp->intr_sem);
626         if (!netif_running(bp->dev))
627                 return;
628
629         bnx2_disable_int(bp);
630         for (i = 0; i < bp->irq_nvecs; i++)
631                 synchronize_irq(bp->irq_tbl[i].vector);
632 }
633
634 static void
635 bnx2_napi_disable(struct bnx2 *bp)
636 {
637         int i;
638
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 napi_disable(&bp->bnx2_napi[i].napi);
641 }
642
643 static void
644 bnx2_napi_enable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_enable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_netif_stop(struct bnx2 *bp)
654 {
655         bnx2_cnic_stop(bp);
656         bnx2_disable_int_sync(bp);
657         if (netif_running(bp->dev)) {
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
661         }
662 }
663
664 static void
665 bnx2_netif_start(struct bnx2 *bp)
666 {
667         if (atomic_dec_and_test(&bp->intr_sem)) {
668                 if (netif_running(bp->dev)) {
669                         netif_tx_wake_all_queues(bp->dev);
670                         bnx2_napi_enable(bp);
671                         bnx2_enable_int(bp);
672                         bnx2_cnic_start(bp);
673                 }
674         }
675 }
676
677 static void
678 bnx2_free_tx_mem(struct bnx2 *bp)
679 {
680         int i;
681
682         for (i = 0; i < bp->num_tx_rings; i++) {
683                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
684                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
685
686                 if (txr->tx_desc_ring) {
687                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
688                                             txr->tx_desc_ring,
689                                             txr->tx_desc_mapping);
690                         txr->tx_desc_ring = NULL;
691                 }
692                 kfree(txr->tx_buf_ring);
693                 txr->tx_buf_ring = NULL;
694         }
695 }
696
697 static void
698 bnx2_free_rx_mem(struct bnx2 *bp)
699 {
700         int i;
701
702         for (i = 0; i < bp->num_rx_rings; i++) {
703                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
704                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
705                 int j;
706
707                 for (j = 0; j < bp->rx_max_ring; j++) {
708                         if (rxr->rx_desc_ring[j])
709                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
710                                                     rxr->rx_desc_ring[j],
711                                                     rxr->rx_desc_mapping[j]);
712                         rxr->rx_desc_ring[j] = NULL;
713                 }
714                 vfree(rxr->rx_buf_ring);
715                 rxr->rx_buf_ring = NULL;
716
717                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
718                         if (rxr->rx_pg_desc_ring[j])
719                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
720                                                     rxr->rx_pg_desc_ring[j],
721                                                     rxr->rx_pg_desc_mapping[j]);
722                         rxr->rx_pg_desc_ring[j] = NULL;
723                 }
724                 vfree(rxr->rx_pg_ring);
725                 rxr->rx_pg_ring = NULL;
726         }
727 }
728
729 static int
730 bnx2_alloc_tx_mem(struct bnx2 *bp)
731 {
732         int i;
733
734         for (i = 0; i < bp->num_tx_rings; i++) {
735                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
736                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
737
738                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
739                 if (txr->tx_buf_ring == NULL)
740                         return -ENOMEM;
741
742                 txr->tx_desc_ring =
743                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
744                                              &txr->tx_desc_mapping);
745                 if (txr->tx_desc_ring == NULL)
746                         return -ENOMEM;
747         }
748         return 0;
749 }
750
751 static int
752 bnx2_alloc_rx_mem(struct bnx2 *bp)
753 {
754         int i;
755
756         for (i = 0; i < bp->num_rx_rings; i++) {
757                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
758                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
759                 int j;
760
761                 rxr->rx_buf_ring =
762                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
763                 if (rxr->rx_buf_ring == NULL)
764                         return -ENOMEM;
765
766                 memset(rxr->rx_buf_ring, 0,
767                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
768
769                 for (j = 0; j < bp->rx_max_ring; j++) {
770                         rxr->rx_desc_ring[j] =
771                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
772                                                      &rxr->rx_desc_mapping[j]);
773                         if (rxr->rx_desc_ring[j] == NULL)
774                                 return -ENOMEM;
775
776                 }
777
778                 if (bp->rx_pg_ring_size) {
779                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
780                                                   bp->rx_max_pg_ring);
781                         if (rxr->rx_pg_ring == NULL)
782                                 return -ENOMEM;
783
784                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
785                                bp->rx_max_pg_ring);
786                 }
787
788                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
789                         rxr->rx_pg_desc_ring[j] =
790                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
791                                                 &rxr->rx_pg_desc_mapping[j]);
792                         if (rxr->rx_pg_desc_ring[j] == NULL)
793                                 return -ENOMEM;
794
795                 }
796         }
797         return 0;
798 }
799
800 static void
801 bnx2_free_mem(struct bnx2 *bp)
802 {
803         int i;
804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
805
806         bnx2_free_tx_mem(bp);
807         bnx2_free_rx_mem(bp);
808
809         for (i = 0; i < bp->ctx_pages; i++) {
810                 if (bp->ctx_blk[i]) {
811                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
812                                             bp->ctx_blk[i],
813                                             bp->ctx_blk_mapping[i]);
814                         bp->ctx_blk[i] = NULL;
815                 }
816         }
817         if (bnapi->status_blk.msi) {
818                 pci_free_consistent(bp->pdev, bp->status_stats_size,
819                                     bnapi->status_blk.msi,
820                                     bp->status_blk_mapping);
821                 bnapi->status_blk.msi = NULL;
822                 bp->stats_blk = NULL;
823         }
824 }
825
826 static int
827 bnx2_alloc_mem(struct bnx2 *bp)
828 {
829         int i, status_blk_size, err;
830         struct bnx2_napi *bnapi;
831         void *status_blk;
832
833         /* Combine status and statistics blocks into one allocation. */
834         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835         if (bp->flags & BNX2_FLAG_MSIX_CAP)
836                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
838         bp->status_stats_size = status_blk_size +
839                                 sizeof(struct statistics_block);
840
841         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
842                                           &bp->status_blk_mapping);
843         if (status_blk == NULL)
844                 goto alloc_mem_err;
845
846         memset(status_blk, 0, bp->status_stats_size);
847
848         bnapi = &bp->bnx2_napi[0];
849         bnapi->status_blk.msi = status_blk;
850         bnapi->hw_tx_cons_ptr =
851                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
852         bnapi->hw_rx_cons_ptr =
853                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
854         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
855                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
856                         struct status_block_msix *sblk;
857
858                         bnapi = &bp->bnx2_napi[i];
859
860                         sblk = (void *) (status_blk +
861                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
862                         bnapi->status_blk.msix = sblk;
863                         bnapi->hw_tx_cons_ptr =
864                                 &sblk->status_tx_quick_consumer_index;
865                         bnapi->hw_rx_cons_ptr =
866                                 &sblk->status_rx_quick_consumer_index;
867                         bnapi->int_num = i << 24;
868                 }
869         }
870
871         bp->stats_blk = status_blk + status_blk_size;
872
873         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
874
875         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
876                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
877                 if (bp->ctx_pages == 0)
878                         bp->ctx_pages = 1;
879                 for (i = 0; i < bp->ctx_pages; i++) {
880                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
881                                                 BCM_PAGE_SIZE,
882                                                 &bp->ctx_blk_mapping[i]);
883                         if (bp->ctx_blk[i] == NULL)
884                                 goto alloc_mem_err;
885                 }
886         }
887
888         err = bnx2_alloc_rx_mem(bp);
889         if (err)
890                 goto alloc_mem_err;
891
892         err = bnx2_alloc_tx_mem(bp);
893         if (err)
894                 goto alloc_mem_err;
895
896         return 0;
897
898 alloc_mem_err:
899         bnx2_free_mem(bp);
900         return -ENOMEM;
901 }
902
903 static void
904 bnx2_report_fw_link(struct bnx2 *bp)
905 {
906         u32 fw_link_status = 0;
907
908         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
909                 return;
910
911         if (bp->link_up) {
912                 u32 bmsr;
913
914                 switch (bp->line_speed) {
915                 case SPEED_10:
916                         if (bp->duplex == DUPLEX_HALF)
917                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
918                         else
919                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
920                         break;
921                 case SPEED_100:
922                         if (bp->duplex == DUPLEX_HALF)
923                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
924                         else
925                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
926                         break;
927                 case SPEED_1000:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
932                         break;
933                 case SPEED_2500:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
938                         break;
939                 }
940
941                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
942
943                 if (bp->autoneg) {
944                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
945
946                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
947                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
948
949                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
950                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
951                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
952                         else
953                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
954                 }
955         }
956         else
957                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
958
959         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
960 }
961
962 static char *
963 bnx2_xceiver_str(struct bnx2 *bp)
964 {
965         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
966                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
967                  "Copper"));
968 }
969
970 static void
971 bnx2_report_link(struct bnx2 *bp)
972 {
973         if (bp->link_up) {
974                 netif_carrier_on(bp->dev);
975                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
976                        bnx2_xceiver_str(bp));
977
978                 printk("%d Mbps ", bp->line_speed);
979
980                 if (bp->duplex == DUPLEX_FULL)
981                         printk("full duplex");
982                 else
983                         printk("half duplex");
984
985                 if (bp->flow_ctrl) {
986                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
987                                 printk(", receive ");
988                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
989                                         printk("& transmit ");
990                         }
991                         else {
992                                 printk(", transmit ");
993                         }
994                         printk("flow control ON");
995                 }
996                 printk("\n");
997         }
998         else {
999                 netif_carrier_off(bp->dev);
1000                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1001                        bnx2_xceiver_str(bp));
1002         }
1003
1004         bnx2_report_fw_link(bp);
1005 }
1006
1007 static void
1008 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1009 {
1010         u32 local_adv, remote_adv;
1011
1012         bp->flow_ctrl = 0;
1013         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1014                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1015
1016                 if (bp->duplex == DUPLEX_FULL) {
1017                         bp->flow_ctrl = bp->req_flow_ctrl;
1018                 }
1019                 return;
1020         }
1021
1022         if (bp->duplex != DUPLEX_FULL) {
1023                 return;
1024         }
1025
1026         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1027             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1028                 u32 val;
1029
1030                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1031                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_TX;
1033                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_RX;
1035                 return;
1036         }
1037
1038         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1039         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040
1041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1042                 u32 new_local_adv = 0;
1043                 u32 new_remote_adv = 0;
1044
1045                 if (local_adv & ADVERTISE_1000XPAUSE)
1046                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1047                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1048                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1049                 if (remote_adv & ADVERTISE_1000XPAUSE)
1050                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1051                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1052                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1053
1054                 local_adv = new_local_adv;
1055                 remote_adv = new_remote_adv;
1056         }
1057
1058         /* See Table 28B-3 of 802.3ab-1999 spec. */
1059         if (local_adv & ADVERTISE_PAUSE_CAP) {
1060                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1061                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1062                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1063                         }
1064                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1065                                 bp->flow_ctrl = FLOW_CTRL_RX;
1066                         }
1067                 }
1068                 else {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                 }
1073         }
1074         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1075                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1076                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1077
1078                         bp->flow_ctrl = FLOW_CTRL_TX;
1079                 }
1080         }
1081 }
1082
1083 static int
1084 bnx2_5709s_linkup(struct bnx2 *bp)
1085 {
1086         u32 val, speed;
1087
1088         bp->link_up = 1;
1089
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1091         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1095                 bp->line_speed = bp->req_line_speed;
1096                 bp->duplex = bp->req_duplex;
1097                 return 0;
1098         }
1099         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1100         switch (speed) {
1101                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1102                         bp->line_speed = SPEED_10;
1103                         break;
1104                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1105                         bp->line_speed = SPEED_100;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1109                         bp->line_speed = SPEED_1000;
1110                         break;
1111                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1112                         bp->line_speed = SPEED_2500;
1113                         break;
1114         }
1115         if (val & MII_BNX2_GP_TOP_AN_FD)
1116                 bp->duplex = DUPLEX_FULL;
1117         else
1118                 bp->duplex = DUPLEX_HALF;
1119         return 0;
1120 }
1121
1122 static int
1123 bnx2_5708s_linkup(struct bnx2 *bp)
1124 {
1125         u32 val;
1126
1127         bp->link_up = 1;
1128         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1129         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1130                 case BCM5708S_1000X_STAT1_SPEED_10:
1131                         bp->line_speed = SPEED_10;
1132                         break;
1133                 case BCM5708S_1000X_STAT1_SPEED_100:
1134                         bp->line_speed = SPEED_100;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_1G:
1137                         bp->line_speed = SPEED_1000;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1140                         bp->line_speed = SPEED_2500;
1141                         break;
1142         }
1143         if (val & BCM5708S_1000X_STAT1_FD)
1144                 bp->duplex = DUPLEX_FULL;
1145         else
1146                 bp->duplex = DUPLEX_HALF;
1147
1148         return 0;
1149 }
1150
1151 static int
1152 bnx2_5706s_linkup(struct bnx2 *bp)
1153 {
1154         u32 bmcr, local_adv, remote_adv, common;
1155
1156         bp->link_up = 1;
1157         bp->line_speed = SPEED_1000;
1158
1159         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1160         if (bmcr & BMCR_FULLDPLX) {
1161                 bp->duplex = DUPLEX_FULL;
1162         }
1163         else {
1164                 bp->duplex = DUPLEX_HALF;
1165         }
1166
1167         if (!(bmcr & BMCR_ANENABLE)) {
1168                 return 0;
1169         }
1170
1171         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1172         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1173
1174         common = local_adv & remote_adv;
1175         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1176
1177                 if (common & ADVERTISE_1000XFULL) {
1178                         bp->duplex = DUPLEX_FULL;
1179                 }
1180                 else {
1181                         bp->duplex = DUPLEX_HALF;
1182                 }
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int
1189 bnx2_copper_linkup(struct bnx2 *bp)
1190 {
1191         u32 bmcr;
1192
1193         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1194         if (bmcr & BMCR_ANENABLE) {
1195                 u32 local_adv, remote_adv, common;
1196
1197                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1198                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1199
1200                 common = local_adv & (remote_adv >> 2);
1201                 if (common & ADVERTISE_1000FULL) {
1202                         bp->line_speed = SPEED_1000;
1203                         bp->duplex = DUPLEX_FULL;
1204                 }
1205                 else if (common & ADVERTISE_1000HALF) {
1206                         bp->line_speed = SPEED_1000;
1207                         bp->duplex = DUPLEX_HALF;
1208                 }
1209                 else {
1210                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1211                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1212
1213                         common = local_adv & remote_adv;
1214                         if (common & ADVERTISE_100FULL) {
1215                                 bp->line_speed = SPEED_100;
1216                                 bp->duplex = DUPLEX_FULL;
1217                         }
1218                         else if (common & ADVERTISE_100HALF) {
1219                                 bp->line_speed = SPEED_100;
1220                                 bp->duplex = DUPLEX_HALF;
1221                         }
1222                         else if (common & ADVERTISE_10FULL) {
1223                                 bp->line_speed = SPEED_10;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_10HALF) {
1227                                 bp->line_speed = SPEED_10;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else {
1231                                 bp->line_speed = 0;
1232                                 bp->link_up = 0;
1233                         }
1234                 }
1235         }
1236         else {
1237                 if (bmcr & BMCR_SPEED100) {
1238                         bp->line_speed = SPEED_100;
1239                 }
1240                 else {
1241                         bp->line_speed = SPEED_10;
1242                 }
1243                 if (bmcr & BMCR_FULLDPLX) {
1244                         bp->duplex = DUPLEX_FULL;
1245                 }
1246                 else {
1247                         bp->duplex = DUPLEX_HALF;
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void
1255 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1256 {
1257         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1258
1259         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1260         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1261         val |= 0x02 << 8;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                 u32 lo_water, hi_water;
1265
1266                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1268                 else
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1270                 if (lo_water >= bp->rx_ring_size)
1271                         lo_water = 0;
1272
1273                 hi_water = bp->rx_ring_size / 4;
1274
1275                 if (hi_water <= lo_water)
1276                         lo_water = 0;
1277
1278                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1279                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1280
1281                 if (hi_water > 0xf)
1282                         hi_water = 0xf;
1283                 else if (hi_water == 0)
1284                         lo_water = 0;
1285                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1286         }
1287         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1288 }
1289
1290 static void
1291 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1292 {
1293         int i;
1294         u32 cid;
1295
1296         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1297                 if (i == 1)
1298                         cid = RX_RSS_CID;
1299                 bnx2_init_rx_context(bp, cid);
1300         }
1301 }
1302
1303 static void
1304 bnx2_set_mac_link(struct bnx2 *bp)
1305 {
1306         u32 val;
1307
1308         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1309         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1310                 (bp->duplex == DUPLEX_HALF)) {
1311                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1312         }
1313
1314         /* Configure the EMAC mode register. */
1315         val = REG_RD(bp, BNX2_EMAC_MODE);
1316
1317         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1318                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1319                 BNX2_EMAC_MODE_25G_MODE);
1320
1321         if (bp->link_up) {
1322                 switch (bp->line_speed) {
1323                         case SPEED_10:
1324                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1325                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1326                                         break;
1327                                 }
1328                                 /* fall through */
1329                         case SPEED_100:
1330                                 val |= BNX2_EMAC_MODE_PORT_MII;
1331                                 break;
1332                         case SPEED_2500:
1333                                 val |= BNX2_EMAC_MODE_25G_MODE;
1334                                 /* fall through */
1335                         case SPEED_1000:
1336                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1337                                 break;
1338                 }
1339         }
1340         else {
1341                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342         }
1343
1344         /* Set the MAC to operate in the appropriate duplex mode. */
1345         if (bp->duplex == DUPLEX_HALF)
1346                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1347         REG_WR(bp, BNX2_EMAC_MODE, val);
1348
1349         /* Enable/disable rx PAUSE. */
1350         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1351
1352         if (bp->flow_ctrl & FLOW_CTRL_RX)
1353                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1354         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1355
1356         /* Enable/disable tx PAUSE. */
1357         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1358         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_TX)
1361                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1362         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1363
1364         /* Acknowledge the interrupt. */
1365         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1366
1367         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1368                 bnx2_init_all_rx_contexts(bp);
1369 }
1370
1371 static void
1372 bnx2_enable_bmsr1(struct bnx2 *bp)
1373 {
1374         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1375             (CHIP_NUM(bp) == CHIP_NUM_5709))
1376                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1377                                MII_BNX2_BLK_ADDR_GP_STATUS);
1378 }
1379
1380 static void
1381 bnx2_disable_bmsr1(struct bnx2 *bp)
1382 {
1383         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384             (CHIP_NUM(bp) == CHIP_NUM_5709))
1385                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1387 }
1388
1389 static int
1390 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1391 {
1392         u32 up1;
1393         int ret = 1;
1394
1395         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1396                 return 0;
1397
1398         if (bp->autoneg & AUTONEG_SPEED)
1399                 bp->advertising |= ADVERTISED_2500baseX_Full;
1400
1401         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1402                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1403
1404         bnx2_read_phy(bp, bp->mii_up1, &up1);
1405         if (!(up1 & BCM5708S_UP1_2G5)) {
1406                 up1 |= BCM5708S_UP1_2G5;
1407                 bnx2_write_phy(bp, bp->mii_up1, up1);
1408                 ret = 0;
1409         }
1410
1411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1413                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1414
1415         return ret;
1416 }
1417
1418 static int
1419 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1420 {
1421         u32 up1;
1422         int ret = 0;
1423
1424         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1425                 return 0;
1426
1427         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1428                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1429
1430         bnx2_read_phy(bp, bp->mii_up1, &up1);
1431         if (up1 & BCM5708S_UP1_2G5) {
1432                 up1 &= ~BCM5708S_UP1_2G5;
1433                 bnx2_write_phy(bp, bp->mii_up1, up1);
1434                 ret = 1;
1435         }
1436
1437         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1438                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1439                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1440
1441         return ret;
1442 }
1443
1444 static void
1445 bnx2_enable_forced_2g5(struct bnx2 *bp)
1446 {
1447         u32 bmcr;
1448
1449         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1450                 return;
1451
1452         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1453                 u32 val;
1454
1455                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1457                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1458                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1459                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1460                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1461
1462                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1464                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465
1466         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469         }
1470
1471         if (bp->autoneg & AUTONEG_SPEED) {
1472                 bmcr &= ~BMCR_ANENABLE;
1473                 if (bp->req_duplex == DUPLEX_FULL)
1474                         bmcr |= BMCR_FULLDPLX;
1475         }
1476         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1477 }
1478
1479 static void
1480 bnx2_disable_forced_2g5(struct bnx2 *bp)
1481 {
1482         u32 bmcr;
1483
1484         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1485                 return;
1486
1487         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1488                 u32 val;
1489
1490                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1491                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1492                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1493                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1494                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1495
1496                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1497                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1498                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499
1500         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503         }
1504
1505         if (bp->autoneg & AUTONEG_SPEED)
1506                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1507         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1508 }
1509
1510 static void
1511 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1512 {
1513         u32 val;
1514
1515         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1516         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1517         if (start)
1518                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1519         else
1520                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1521 }
1522
1523 static int
1524 bnx2_set_link(struct bnx2 *bp)
1525 {
1526         u32 bmsr;
1527         u8 link_up;
1528
1529         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1530                 bp->link_up = 1;
1531                 return 0;
1532         }
1533
1534         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1535                 return 0;
1536
1537         link_up = bp->link_up;
1538
1539         bnx2_enable_bmsr1(bp);
1540         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1541         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1542         bnx2_disable_bmsr1(bp);
1543
1544         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1545             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1546                 u32 val, an_dbg;
1547
1548                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1549                         bnx2_5706s_force_link_dn(bp, 0);
1550                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1551                 }
1552                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1553
1554                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1555                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1556                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1557
1558                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1559                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1560                         bmsr |= BMSR_LSTATUS;
1561                 else
1562                         bmsr &= ~BMSR_LSTATUS;
1563         }
1564
1565         if (bmsr & BMSR_LSTATUS) {
1566                 bp->link_up = 1;
1567
1568                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1569                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1570                                 bnx2_5706s_linkup(bp);
1571                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1572                                 bnx2_5708s_linkup(bp);
1573                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1574                                 bnx2_5709s_linkup(bp);
1575                 }
1576                 else {
1577                         bnx2_copper_linkup(bp);
1578                 }
1579                 bnx2_resolve_flow_ctrl(bp);
1580         }
1581         else {
1582                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1583                     (bp->autoneg & AUTONEG_SPEED))
1584                         bnx2_disable_forced_2g5(bp);
1585
1586                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1587                         u32 bmcr;
1588
1589                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1590                         bmcr |= BMCR_ANENABLE;
1591                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1592
1593                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1594                 }
1595                 bp->link_up = 0;
1596         }
1597
1598         if (bp->link_up != link_up) {
1599                 bnx2_report_link(bp);
1600         }
1601
1602         bnx2_set_mac_link(bp);
1603
1604         return 0;
1605 }
1606
1607 static int
1608 bnx2_reset_phy(struct bnx2 *bp)
1609 {
1610         int i;
1611         u32 reg;
1612
1613         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1614
1615 #define PHY_RESET_MAX_WAIT 100
1616         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1617                 udelay(10);
1618
1619                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1620                 if (!(reg & BMCR_RESET)) {
1621                         udelay(20);
1622                         break;
1623                 }
1624         }
1625         if (i == PHY_RESET_MAX_WAIT) {
1626                 return -EBUSY;
1627         }
1628         return 0;
1629 }
1630
1631 static u32
1632 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1633 {
1634         u32 adv = 0;
1635
1636         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1637                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1638
1639                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1640                         adv = ADVERTISE_1000XPAUSE;
1641                 }
1642                 else {
1643                         adv = ADVERTISE_PAUSE_CAP;
1644                 }
1645         }
1646         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1647                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1648                         adv = ADVERTISE_1000XPSE_ASYM;
1649                 }
1650                 else {
1651                         adv = ADVERTISE_PAUSE_ASYM;
1652                 }
1653         }
1654         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1655                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1656                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1657                 }
1658                 else {
1659                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1660                 }
1661         }
1662         return adv;
1663 }
1664
1665 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1666
1667 static int
1668 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1669 __releases(&bp->phy_lock)
1670 __acquires(&bp->phy_lock)
1671 {
1672         u32 speed_arg = 0, pause_adv;
1673
1674         pause_adv = bnx2_phy_get_pause_adv(bp);
1675
1676         if (bp->autoneg & AUTONEG_SPEED) {
1677                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1678                 if (bp->advertising & ADVERTISED_10baseT_Half)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1680                 if (bp->advertising & ADVERTISED_10baseT_Full)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1682                 if (bp->advertising & ADVERTISED_100baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1684                 if (bp->advertising & ADVERTISED_100baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1686                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1688                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1690         } else {
1691                 if (bp->req_line_speed == SPEED_2500)
1692                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693                 else if (bp->req_line_speed == SPEED_1000)
1694                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695                 else if (bp->req_line_speed == SPEED_100) {
1696                         if (bp->req_duplex == DUPLEX_FULL)
1697                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698                         else
1699                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1700                 } else if (bp->req_line_speed == SPEED_10) {
1701                         if (bp->req_duplex == DUPLEX_FULL)
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1703                         else
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1705                 }
1706         }
1707
1708         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1709                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1710         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1711                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1712
1713         if (port == PORT_TP)
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1715                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1716
1717         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1718
1719         spin_unlock_bh(&bp->phy_lock);
1720         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1721         spin_lock_bh(&bp->phy_lock);
1722
1723         return 0;
1724 }
1725
1726 static int
1727 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1728 __releases(&bp->phy_lock)
1729 __acquires(&bp->phy_lock)
1730 {
1731         u32 adv, bmcr;
1732         u32 new_adv = 0;
1733
1734         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1735                 return (bnx2_setup_remote_phy(bp, port));
1736
1737         if (!(bp->autoneg & AUTONEG_SPEED)) {
1738                 u32 new_bmcr;
1739                 int force_link_down = 0;
1740
1741                 if (bp->req_line_speed == SPEED_2500) {
1742                         if (!bnx2_test_and_enable_2g5(bp))
1743                                 force_link_down = 1;
1744                 } else if (bp->req_line_speed == SPEED_1000) {
1745                         if (bnx2_test_and_disable_2g5(bp))
1746                                 force_link_down = 1;
1747                 }
1748                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1749                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1750
1751                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1752                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1753                 new_bmcr |= BMCR_SPEED1000;
1754
1755                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1756                         if (bp->req_line_speed == SPEED_2500)
1757                                 bnx2_enable_forced_2g5(bp);
1758                         else if (bp->req_line_speed == SPEED_1000) {
1759                                 bnx2_disable_forced_2g5(bp);
1760                                 new_bmcr &= ~0x2000;
1761                         }
1762
1763                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1764                         if (bp->req_line_speed == SPEED_2500)
1765                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1766                         else
1767                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1768                 }
1769
1770                 if (bp->req_duplex == DUPLEX_FULL) {
1771                         adv |= ADVERTISE_1000XFULL;
1772                         new_bmcr |= BMCR_FULLDPLX;
1773                 }
1774                 else {
1775                         adv |= ADVERTISE_1000XHALF;
1776                         new_bmcr &= ~BMCR_FULLDPLX;
1777                 }
1778                 if ((new_bmcr != bmcr) || (force_link_down)) {
1779                         /* Force a link down visible on the other side */
1780                         if (bp->link_up) {
1781                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1782                                                ~(ADVERTISE_1000XFULL |
1783                                                  ADVERTISE_1000XHALF));
1784                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1785                                         BMCR_ANRESTART | BMCR_ANENABLE);
1786
1787                                 bp->link_up = 0;
1788                                 netif_carrier_off(bp->dev);
1789                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1790                                 bnx2_report_link(bp);
1791                         }
1792                         bnx2_write_phy(bp, bp->mii_adv, adv);
1793                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                 } else {
1795                         bnx2_resolve_flow_ctrl(bp);
1796                         bnx2_set_mac_link(bp);
1797                 }
1798                 return 0;
1799         }
1800
1801         bnx2_test_and_enable_2g5(bp);
1802
1803         if (bp->advertising & ADVERTISED_1000baseT_Full)
1804                 new_adv |= ADVERTISE_1000XFULL;
1805
1806         new_adv |= bnx2_phy_get_pause_adv(bp);
1807
1808         bnx2_read_phy(bp, bp->mii_adv, &adv);
1809         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1810
1811         bp->serdes_an_pending = 0;
1812         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1813                 /* Force a link down visible on the other side */
1814                 if (bp->link_up) {
1815                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1816                         spin_unlock_bh(&bp->phy_lock);
1817                         msleep(20);
1818                         spin_lock_bh(&bp->phy_lock);
1819                 }
1820
1821                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1822                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1823                         BMCR_ANENABLE);
1824                 /* Speed up link-up time when the link partner
1825                  * does not autonegotiate which is very common
1826                  * in blade servers. Some blade servers use
1827                  * IPMI for kerboard input and it's important
1828                  * to minimize link disruptions. Autoneg. involves
1829                  * exchanging base pages plus 3 next pages and
1830                  * normally completes in about 120 msec.
1831                  */
1832                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1833                 bp->serdes_an_pending = 1;
1834                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1835         } else {
1836                 bnx2_resolve_flow_ctrl(bp);
1837                 bnx2_set_mac_link(bp);
1838         }
1839
1840         return 0;
1841 }
1842
1843 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1844         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1845                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1846                 (ADVERTISED_1000baseT_Full)
1847
1848 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1849         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1850         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1851         ADVERTISED_1000baseT_Full)
1852
1853 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1854         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1855
1856 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1857
1858 static void
1859 bnx2_set_default_remote_link(struct bnx2 *bp)
1860 {
1861         u32 link;
1862
1863         if (bp->phy_port == PORT_TP)
1864                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1865         else
1866                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1867
1868         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1869                 bp->req_line_speed = 0;
1870                 bp->autoneg |= AUTONEG_SPEED;
1871                 bp->advertising = ADVERTISED_Autoneg;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1873                         bp->advertising |= ADVERTISED_10baseT_Half;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1875                         bp->advertising |= ADVERTISED_10baseT_Full;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1877                         bp->advertising |= ADVERTISED_100baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1879                         bp->advertising |= ADVERTISED_100baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1881                         bp->advertising |= ADVERTISED_1000baseT_Full;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1883                         bp->advertising |= ADVERTISED_2500baseX_Full;
1884         } else {
1885                 bp->autoneg = 0;
1886                 bp->advertising = 0;
1887                 bp->req_duplex = DUPLEX_FULL;
1888                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1889                         bp->req_line_speed = SPEED_10;
1890                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1891                                 bp->req_duplex = DUPLEX_HALF;
1892                 }
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1894                         bp->req_line_speed = SPEED_100;
1895                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896                                 bp->req_duplex = DUPLEX_HALF;
1897                 }
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1899                         bp->req_line_speed = SPEED_1000;
1900                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1901                         bp->req_line_speed = SPEED_2500;
1902         }
1903 }
1904
1905 static void
1906 bnx2_set_default_link(struct bnx2 *bp)
1907 {
1908         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1909                 bnx2_set_default_remote_link(bp);
1910                 return;
1911         }
1912
1913         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1914         bp->req_line_speed = 0;
1915         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1916                 u32 reg;
1917
1918                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1919
1920                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1921                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1922                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1923                         bp->autoneg = 0;
1924                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1925                         bp->req_duplex = DUPLEX_FULL;
1926                 }
1927         } else
1928                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1929 }
1930
1931 static void
1932 bnx2_send_heart_beat(struct bnx2 *bp)
1933 {
1934         u32 msg;
1935         u32 addr;
1936
1937         spin_lock(&bp->indirect_lock);
1938         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1939         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1940         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1941         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1942         spin_unlock(&bp->indirect_lock);
1943 }
1944
1945 static void
1946 bnx2_remote_phy_event(struct bnx2 *bp)
1947 {
1948         u32 msg;
1949         u8 link_up = bp->link_up;
1950         u8 old_port;
1951
1952         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1953
1954         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1955                 bnx2_send_heart_beat(bp);
1956
1957         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1958
1959         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1960                 bp->link_up = 0;
1961         else {
1962                 u32 speed;
1963
1964                 bp->link_up = 1;
1965                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1966                 bp->duplex = DUPLEX_FULL;
1967                 switch (speed) {
1968                         case BNX2_LINK_STATUS_10HALF:
1969                                 bp->duplex = DUPLEX_HALF;
1970                         case BNX2_LINK_STATUS_10FULL:
1971                                 bp->line_speed = SPEED_10;
1972                                 break;
1973                         case BNX2_LINK_STATUS_100HALF:
1974                                 bp->duplex = DUPLEX_HALF;
1975                         case BNX2_LINK_STATUS_100BASE_T4:
1976                         case BNX2_LINK_STATUS_100FULL:
1977                                 bp->line_speed = SPEED_100;
1978                                 break;
1979                         case BNX2_LINK_STATUS_1000HALF:
1980                                 bp->duplex = DUPLEX_HALF;
1981                         case BNX2_LINK_STATUS_1000FULL:
1982                                 bp->line_speed = SPEED_1000;
1983                                 break;
1984                         case BNX2_LINK_STATUS_2500HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                         case BNX2_LINK_STATUS_2500FULL:
1987                                 bp->line_speed = SPEED_2500;
1988                                 break;
1989                         default:
1990                                 bp->line_speed = 0;
1991                                 break;
1992                 }
1993
1994                 bp->flow_ctrl = 0;
1995                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1996                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1997                         if (bp->duplex == DUPLEX_FULL)
1998                                 bp->flow_ctrl = bp->req_flow_ctrl;
1999                 } else {
2000                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2001                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2002                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2003                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2004                 }
2005
2006                 old_port = bp->phy_port;
2007                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2008                         bp->phy_port = PORT_FIBRE;
2009                 else
2010                         bp->phy_port = PORT_TP;
2011
2012                 if (old_port != bp->phy_port)
2013                         bnx2_set_default_link(bp);
2014
2015         }
2016         if (bp->link_up != link_up)
2017                 bnx2_report_link(bp);
2018
2019         bnx2_set_mac_link(bp);
2020 }
2021
2022 static int
2023 bnx2_set_remote_link(struct bnx2 *bp)
2024 {
2025         u32 evt_code;
2026
2027         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2028         switch (evt_code) {
2029                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2030                         bnx2_remote_phy_event(bp);
2031                         break;
2032                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2033                 default:
2034                         bnx2_send_heart_beat(bp);
2035                         break;
2036         }
2037         return 0;
2038 }
2039
2040 static int
2041 bnx2_setup_copper_phy(struct bnx2 *bp)
2042 __releases(&bp->phy_lock)
2043 __acquires(&bp->phy_lock)
2044 {
2045         u32 bmcr;
2046         u32 new_bmcr;
2047
2048         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2049
2050         if (bp->autoneg & AUTONEG_SPEED) {
2051                 u32 adv_reg, adv1000_reg;
2052                 u32 new_adv_reg = 0;
2053                 u32 new_adv1000_reg = 0;
2054
2055                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2056                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2057                         ADVERTISE_PAUSE_ASYM);
2058
2059                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2060                 adv1000_reg &= PHY_ALL_1000_SPEED;
2061
2062                 if (bp->advertising & ADVERTISED_10baseT_Half)
2063                         new_adv_reg |= ADVERTISE_10HALF;
2064                 if (bp->advertising & ADVERTISED_10baseT_Full)
2065                         new_adv_reg |= ADVERTISE_10FULL;
2066                 if (bp->advertising & ADVERTISED_100baseT_Half)
2067                         new_adv_reg |= ADVERTISE_100HALF;
2068                 if (bp->advertising & ADVERTISED_100baseT_Full)
2069                         new_adv_reg |= ADVERTISE_100FULL;
2070                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2071                         new_adv1000_reg |= ADVERTISE_1000FULL;
2072
2073                 new_adv_reg |= ADVERTISE_CSMA;
2074
2075                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2076
2077                 if ((adv1000_reg != new_adv1000_reg) ||
2078                         (adv_reg != new_adv_reg) ||
2079                         ((bmcr & BMCR_ANENABLE) == 0)) {
2080
2081                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2082                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2083                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2084                                 BMCR_ANENABLE);
2085                 }
2086                 else if (bp->link_up) {
2087                         /* Flow ctrl may have changed from auto to forced */
2088                         /* or vice-versa. */
2089
2090                         bnx2_resolve_flow_ctrl(bp);
2091                         bnx2_set_mac_link(bp);
2092                 }
2093                 return 0;
2094         }
2095
2096         new_bmcr = 0;
2097         if (bp->req_line_speed == SPEED_100) {
2098                 new_bmcr |= BMCR_SPEED100;
2099         }
2100         if (bp->req_duplex == DUPLEX_FULL) {
2101                 new_bmcr |= BMCR_FULLDPLX;
2102         }
2103         if (new_bmcr != bmcr) {
2104                 u32 bmsr;
2105
2106                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108
2109                 if (bmsr & BMSR_LSTATUS) {
2110                         /* Force link down */
2111                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2112                         spin_unlock_bh(&bp->phy_lock);
2113                         msleep(50);
2114                         spin_lock_bh(&bp->phy_lock);
2115
2116                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118                 }
2119
2120                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2121
2122                 /* Normally, the new speed is setup after the link has
2123                  * gone down and up again. In some cases, link will not go
2124                  * down so we need to set up the new speed here.
2125                  */
2126                 if (bmsr & BMSR_LSTATUS) {
2127                         bp->line_speed = bp->req_line_speed;
2128                         bp->duplex = bp->req_duplex;
2129                         bnx2_resolve_flow_ctrl(bp);
2130                         bnx2_set_mac_link(bp);
2131                 }
2132         } else {
2133                 bnx2_resolve_flow_ctrl(bp);
2134                 bnx2_set_mac_link(bp);
2135         }
2136         return 0;
2137 }
2138
2139 static int
2140 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2141 __releases(&bp->phy_lock)
2142 __acquires(&bp->phy_lock)
2143 {
2144         if (bp->loopback == MAC_LOOPBACK)
2145                 return 0;
2146
2147         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2148                 return (bnx2_setup_serdes_phy(bp, port));
2149         }
2150         else {
2151                 return (bnx2_setup_copper_phy(bp));
2152         }
2153 }
2154
2155 static int
2156 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157 {
2158         u32 val;
2159
2160         bp->mii_bmcr = MII_BMCR + 0x10;
2161         bp->mii_bmsr = MII_BMSR + 0x10;
2162         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2163         bp->mii_adv = MII_ADVERTISE + 0x10;
2164         bp->mii_lpa = MII_LPA + 0x10;
2165         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2166
2167         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2168         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2169
2170         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2171         if (reset_phy)
2172                 bnx2_reset_phy(bp);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2175
2176         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2177         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2178         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2179         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2180
2181         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2182         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2183         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2184                 val |= BCM5708S_UP1_2G5;
2185         else
2186                 val &= ~BCM5708S_UP1_2G5;
2187         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2190         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2191         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2192         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2193
2194         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2195
2196         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2197               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2198         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201
2202         return 0;
2203 }
2204
2205 static int
2206 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207 {
2208         u32 val;
2209
2210         if (reset_phy)
2211                 bnx2_reset_phy(bp);
2212
2213         bp->mii_up1 = BCM5708S_UP1;
2214
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2216         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2217         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2218
2219         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2220         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2221         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2224         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2226
2227         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2228                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2229                 val |= BCM5708S_UP1_2G5;
2230                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2231         }
2232
2233         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2234             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2235             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2236                 /* increase tx signal amplitude */
2237                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2238                                BCM5708S_BLK_ADDR_TX_MISC);
2239                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2240                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2241                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2242                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243         }
2244
2245         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2246               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2247
2248         if (val) {
2249                 u32 is_backplane;
2250
2251                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2252                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2253                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254                                        BCM5708S_BLK_ADDR_TX_MISC);
2255                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2256                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                        BCM5708S_BLK_ADDR_DIG);
2258                 }
2259         }
2260         return 0;
2261 }
2262
2263 static int
2264 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2265 {
2266         if (reset_phy)
2267                 bnx2_reset_phy(bp);
2268
2269         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2270
2271         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273
2274         if (bp->dev->mtu > 1500) {
2275                 u32 val;
2276
2277                 /* Set extended packet length bit */
2278                 bnx2_write_phy(bp, 0x18, 0x7);
2279                 bnx2_read_phy(bp, 0x18, &val);
2280                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2281
2282                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2283                 bnx2_read_phy(bp, 0x1c, &val);
2284                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2285         }
2286         else {
2287                 u32 val;
2288
2289                 bnx2_write_phy(bp, 0x18, 0x7);
2290                 bnx2_read_phy(bp, 0x18, &val);
2291                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2292
2293                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2294                 bnx2_read_phy(bp, 0x1c, &val);
2295                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296         }
2297
2298         return 0;
2299 }
2300
2301 static int
2302 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303 {
2304         u32 val;
2305
2306         if (reset_phy)
2307                 bnx2_reset_phy(bp);
2308
2309         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2310                 bnx2_write_phy(bp, 0x18, 0x0c00);
2311                 bnx2_write_phy(bp, 0x17, 0x000a);
2312                 bnx2_write_phy(bp, 0x15, 0x310b);
2313                 bnx2_write_phy(bp, 0x17, 0x201f);
2314                 bnx2_write_phy(bp, 0x15, 0x9506);
2315                 bnx2_write_phy(bp, 0x17, 0x401f);
2316                 bnx2_write_phy(bp, 0x15, 0x14e2);
2317                 bnx2_write_phy(bp, 0x18, 0x0400);
2318         }
2319
2320         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2321                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2322                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2323                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2324                 val &= ~(1 << 8);
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326         }
2327
2328         if (bp->dev->mtu > 1500) {
2329                 /* Set extended packet length bit */
2330                 bnx2_write_phy(bp, 0x18, 0x7);
2331                 bnx2_read_phy(bp, 0x18, &val);
2332                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2333
2334                 bnx2_read_phy(bp, 0x10, &val);
2335                 bnx2_write_phy(bp, 0x10, val | 0x1);
2336         }
2337         else {
2338                 bnx2_write_phy(bp, 0x18, 0x7);
2339                 bnx2_read_phy(bp, 0x18, &val);
2340                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2341
2342                 bnx2_read_phy(bp, 0x10, &val);
2343                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2344         }
2345
2346         /* ethernet@wirespeed */
2347         bnx2_write_phy(bp, 0x18, 0x7007);
2348         bnx2_read_phy(bp, 0x18, &val);
2349         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350         return 0;
2351 }
2352
2353
2354 static int
2355 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2356 __releases(&bp->phy_lock)
2357 __acquires(&bp->phy_lock)
2358 {
2359         u32 val;
2360         int rc = 0;
2361
2362         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2363         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2364
2365         bp->mii_bmcr = MII_BMCR;
2366         bp->mii_bmsr = MII_BMSR;
2367         bp->mii_bmsr1 = MII_BMSR;
2368         bp->mii_adv = MII_ADVERTISE;
2369         bp->mii_lpa = MII_LPA;
2370
2371         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2372
2373         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2374                 goto setup_phy;
2375
2376         bnx2_read_phy(bp, MII_PHYSID1, &val);
2377         bp->phy_id = val << 16;
2378         bnx2_read_phy(bp, MII_PHYSID2, &val);
2379         bp->phy_id |= val & 0xffff;
2380
2381         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2383                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2384                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2385                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2386                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2387                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2388         }
2389         else {
2390                 rc = bnx2_init_copper_phy(bp, reset_phy);
2391         }
2392
2393 setup_phy:
2394         if (!rc)
2395                 rc = bnx2_setup_phy(bp, bp->phy_port);
2396
2397         return rc;
2398 }
2399
2400 static int
2401 bnx2_set_mac_loopback(struct bnx2 *bp)
2402 {
2403         u32 mac_mode;
2404
2405         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2406         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2407         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2408         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409         bp->link_up = 1;
2410         return 0;
2411 }
2412
2413 static int bnx2_test_link(struct bnx2 *);
2414
2415 static int
2416 bnx2_set_phy_loopback(struct bnx2 *bp)
2417 {
2418         u32 mac_mode;
2419         int rc, i;
2420
2421         spin_lock_bh(&bp->phy_lock);
2422         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2423                             BMCR_SPEED1000);
2424         spin_unlock_bh(&bp->phy_lock);
2425         if (rc)
2426                 return rc;
2427
2428         for (i = 0; i < 10; i++) {
2429                 if (bnx2_test_link(bp) == 0)
2430                         break;
2431                 msleep(100);
2432         }
2433
2434         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2435         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2436                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2437                       BNX2_EMAC_MODE_25G_MODE);
2438
2439         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441         bp->link_up = 1;
2442         return 0;
2443 }
2444
2445 static int
2446 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2447 {
2448         int i;
2449         u32 val;
2450
2451         bp->fw_wr_seq++;
2452         msg_data |= bp->fw_wr_seq;
2453
2454         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2455
2456         if (!ack)
2457                 return 0;
2458
2459         /* wait for an acknowledgement. */
2460         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2461                 msleep(10);
2462
2463                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2464
2465                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2466                         break;
2467         }
2468         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2469                 return 0;
2470
2471         /* If we timed out, inform the firmware that this is the case. */
2472         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2473                 if (!silent)
2474                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2475                                             "%x\n", msg_data);
2476
2477                 msg_data &= ~BNX2_DRV_MSG_CODE;
2478                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2479
2480                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2481
2482                 return -EBUSY;
2483         }
2484
2485         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2486                 return -EIO;
2487
2488         return 0;
2489 }
2490
2491 static int
2492 bnx2_init_5709_context(struct bnx2 *bp)
2493 {
2494         int i, ret = 0;
2495         u32 val;
2496
2497         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2498         val |= (BCM_PAGE_BITS - 8) << 16;
2499         REG_WR(bp, BNX2_CTX_COMMAND, val);
2500         for (i = 0; i < 10; i++) {
2501                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2502                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2503                         break;
2504                 udelay(2);
2505         }
2506         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2507                 return -EBUSY;
2508
2509         for (i = 0; i < bp->ctx_pages; i++) {
2510                 int j;
2511
2512                 if (bp->ctx_blk[i])
2513                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2514                 else
2515                         return -ENOMEM;
2516
2517                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2518                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2519                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2520                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2521                        (u64) bp->ctx_blk_mapping[i] >> 32);
2522                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2523                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2524                 for (j = 0; j < 10; j++) {
2525
2526                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2527                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2528                                 break;
2529                         udelay(5);
2530                 }
2531                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2532                         ret = -EBUSY;
2533                         break;
2534                 }
2535         }
2536         return ret;
2537 }
2538
2539 static void
2540 bnx2_init_context(struct bnx2 *bp)
2541 {
2542         u32 vcid;
2543
2544         vcid = 96;
2545         while (vcid) {
2546                 u32 vcid_addr, pcid_addr, offset;
2547                 int i;
2548
2549                 vcid--;
2550
2551                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2552                         u32 new_vcid;
2553
2554                         vcid_addr = GET_PCID_ADDR(vcid);
2555                         if (vcid & 0x8) {
2556                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2557                         }
2558                         else {
2559                                 new_vcid = vcid;
2560                         }
2561                         pcid_addr = GET_PCID_ADDR(new_vcid);
2562                 }
2563                 else {
2564                         vcid_addr = GET_CID_ADDR(vcid);
2565                         pcid_addr = vcid_addr;
2566                 }
2567
2568                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2569                         vcid_addr += (i << PHY_CTX_SHIFT);
2570                         pcid_addr += (i << PHY_CTX_SHIFT);
2571
2572                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2573                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2574
2575                         /* Zero out the context. */
2576                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2577                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2578                 }
2579         }
2580 }
2581
2582 static int
2583 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2584 {
2585         u16 *good_mbuf;
2586         u32 good_mbuf_cnt;
2587         u32 val;
2588
2589         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2590         if (good_mbuf == NULL) {
2591                 printk(KERN_ERR PFX "Failed to allocate memory in "
2592                                     "bnx2_alloc_bad_rbuf\n");
2593                 return -ENOMEM;
2594         }
2595
2596         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2597                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2598
2599         good_mbuf_cnt = 0;
2600
2601         /* Allocate a bunch of mbufs and save the good ones in an array. */
2602         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2603         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2604                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2605                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2606
2607                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2608
2609                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2610
2611                 /* The addresses with Bit 9 set are bad memory blocks. */
2612                 if (!(val & (1 << 9))) {
2613                         good_mbuf[good_mbuf_cnt] = (u16) val;
2614                         good_mbuf_cnt++;
2615                 }
2616
2617                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2618         }
2619
2620         /* Free the good ones back to the mbuf pool thus discarding
2621          * all the bad ones. */
2622         while (good_mbuf_cnt) {
2623                 good_mbuf_cnt--;
2624
2625                 val = good_mbuf[good_mbuf_cnt];
2626                 val = (val << 9) | val | 1;
2627
2628                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2629         }
2630         kfree(good_mbuf);
2631         return 0;
2632 }
2633
2634 static void
2635 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2636 {
2637         u32 val;
2638
2639         val = (mac_addr[0] << 8) | mac_addr[1];
2640
2641         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2642
2643         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2644                 (mac_addr[4] << 8) | mac_addr[5];
2645
2646         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2647 }
2648
2649 static inline int
2650 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2651 {
2652         dma_addr_t mapping;
2653         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2654         struct rx_bd *rxbd =
2655                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2656         struct page *page = alloc_page(GFP_ATOMIC);
2657
2658         if (!page)
2659                 return -ENOMEM;
2660         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2661                                PCI_DMA_FROMDEVICE);
2662         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2663                 __free_page(page);
2664                 return -EIO;
2665         }
2666
2667         rx_pg->page = page;
2668         pci_unmap_addr_set(rx_pg, mapping, mapping);
2669         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2670         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2671         return 0;
2672 }
2673
2674 static void
2675 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2676 {
2677         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2678         struct page *page = rx_pg->page;
2679
2680         if (!page)
2681                 return;
2682
2683         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2684                        PCI_DMA_FROMDEVICE);
2685
2686         __free_page(page);
2687         rx_pg->page = NULL;
2688 }
2689
2690 static inline int
2691 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2692 {
2693         struct sk_buff *skb;
2694         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2695         dma_addr_t mapping;
2696         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2697         unsigned long align;
2698
2699         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2700         if (skb == NULL) {
2701                 return -ENOMEM;
2702         }
2703
2704         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2705                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2706
2707         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2708                 PCI_DMA_FROMDEVICE);
2709         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2710                 dev_kfree_skb(skb);
2711                 return -EIO;
2712         }
2713
2714         rx_buf->skb = skb;
2715         pci_unmap_addr_set(rx_buf, mapping, mapping);
2716
2717         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2718         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2719
2720         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2721
2722         return 0;
2723 }
2724
2725 static int
2726 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2727 {
2728         struct status_block *sblk = bnapi->status_blk.msi;
2729         u32 new_link_state, old_link_state;
2730         int is_set = 1;
2731
2732         new_link_state = sblk->status_attn_bits & event;
2733         old_link_state = sblk->status_attn_bits_ack & event;
2734         if (new_link_state != old_link_state) {
2735                 if (new_link_state)
2736                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2737                 else
2738                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2739         } else
2740                 is_set = 0;
2741
2742         return is_set;
2743 }
2744
2745 static void
2746 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2747 {
2748         spin_lock(&bp->phy_lock);
2749
2750         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2751                 bnx2_set_link(bp);
2752         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2753                 bnx2_set_remote_link(bp);
2754
2755         spin_unlock(&bp->phy_lock);
2756
2757 }
2758
2759 static inline u16
2760 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2761 {
2762         u16 cons;
2763
2764         /* Tell compiler that status block fields can change. */
2765         barrier();
2766         cons = *bnapi->hw_tx_cons_ptr;
2767         barrier();
2768         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2769                 cons++;
2770         return cons;
2771 }
2772
2773 static int
2774 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2775 {
2776         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2777         u16 hw_cons, sw_cons, sw_ring_cons;
2778         int tx_pkt = 0, index;
2779         struct netdev_queue *txq;
2780
2781         index = (bnapi - bp->bnx2_napi);
2782         txq = netdev_get_tx_queue(bp->dev, index);
2783
2784         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2785         sw_cons = txr->tx_cons;
2786
2787         while (sw_cons != hw_cons) {
2788                 struct sw_tx_bd *tx_buf;
2789                 struct sk_buff *skb;
2790                 int i, last;
2791
2792                 sw_ring_cons = TX_RING_IDX(sw_cons);
2793
2794                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2795                 skb = tx_buf->skb;
2796
2797                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2798                 prefetch(&skb->end);
2799
2800                 /* partial BD completions possible with TSO packets */
2801                 if (tx_buf->is_gso) {
2802                         u16 last_idx, last_ring_idx;
2803
2804                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2805                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2806                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2807                                 last_idx++;
2808                         }
2809                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2810                                 break;
2811                         }
2812                 }
2813
2814                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2815
2816                 tx_buf->skb = NULL;
2817                 last = tx_buf->nr_frags;
2818
2819                 for (i = 0; i < last; i++) {
2820                         sw_cons = NEXT_TX_BD(sw_cons);
2821                 }
2822
2823                 sw_cons = NEXT_TX_BD(sw_cons);
2824
2825                 dev_kfree_skb(skb);
2826                 tx_pkt++;
2827                 if (tx_pkt == budget)
2828                         break;
2829
2830                 if (hw_cons == sw_cons)
2831                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2832         }
2833
2834         txr->hw_tx_cons = hw_cons;
2835         txr->tx_cons = sw_cons;
2836
2837         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2838          * before checking for netif_tx_queue_stopped().  Without the
2839          * memory barrier, there is a small possibility that bnx2_start_xmit()
2840          * will miss it and cause the queue to be stopped forever.
2841          */
2842         smp_mb();
2843
2844         if (unlikely(netif_tx_queue_stopped(txq)) &&
2845                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2846                 __netif_tx_lock(txq, smp_processor_id());
2847                 if ((netif_tx_queue_stopped(txq)) &&
2848                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2849                         netif_tx_wake_queue(txq);
2850                 __netif_tx_unlock(txq);
2851         }
2852
2853         return tx_pkt;
2854 }
2855
2856 static void
2857 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2858                         struct sk_buff *skb, int count)
2859 {
2860         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2861         struct rx_bd *cons_bd, *prod_bd;
2862         int i;
2863         u16 hw_prod, prod;
2864         u16 cons = rxr->rx_pg_cons;
2865
2866         cons_rx_pg = &rxr->rx_pg_ring[cons];
2867
2868         /* The caller was unable to allocate a new page to replace the
2869          * last one in the frags array, so we need to recycle that page
2870          * and then free the skb.
2871          */
2872         if (skb) {
2873                 struct page *page;
2874                 struct skb_shared_info *shinfo;
2875
2876                 shinfo = skb_shinfo(skb);
2877                 shinfo->nr_frags--;
2878                 page = shinfo->frags[shinfo->nr_frags].page;
2879                 shinfo->frags[shinfo->nr_frags].page = NULL;
2880
2881                 cons_rx_pg->page = page;
2882                 dev_kfree_skb(skb);
2883         }
2884
2885         hw_prod = rxr->rx_pg_prod;
2886
2887         for (i = 0; i < count; i++) {
2888                 prod = RX_PG_RING_IDX(hw_prod);
2889
2890                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2891                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2892                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2893                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2894
2895                 if (prod != cons) {
2896                         prod_rx_pg->page = cons_rx_pg->page;
2897                         cons_rx_pg->page = NULL;
2898                         pci_unmap_addr_set(prod_rx_pg, mapping,
2899                                 pci_unmap_addr(cons_rx_pg, mapping));
2900
2901                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2902                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2903
2904                 }
2905                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2906                 hw_prod = NEXT_RX_BD(hw_prod);
2907         }
2908         rxr->rx_pg_prod = hw_prod;
2909         rxr->rx_pg_cons = cons;
2910 }
2911
2912 static inline void
2913 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2914                   struct sk_buff *skb, u16 cons, u16 prod)
2915 {
2916         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2917         struct rx_bd *cons_bd, *prod_bd;
2918
2919         cons_rx_buf = &rxr->rx_buf_ring[cons];
2920         prod_rx_buf = &rxr->rx_buf_ring[prod];
2921
2922         pci_dma_sync_single_for_device(bp->pdev,
2923                 pci_unmap_addr(cons_rx_buf, mapping),
2924                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2925
2926         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2927
2928         prod_rx_buf->skb = skb;
2929
2930         if (cons == prod)
2931                 return;
2932
2933         pci_unmap_addr_set(prod_rx_buf, mapping,
2934                         pci_unmap_addr(cons_rx_buf, mapping));
2935
2936         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2937         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2938         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2939         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2940 }
2941
2942 static int
2943 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2944             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2945             u32 ring_idx)
2946 {
2947         int err;
2948         u16 prod = ring_idx & 0xffff;
2949
2950         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2951         if (unlikely(err)) {
2952                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2953                 if (hdr_len) {
2954                         unsigned int raw_len = len + 4;
2955                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2956
2957                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2958                 }
2959                 return err;
2960         }
2961
2962         skb_reserve(skb, BNX2_RX_OFFSET);
2963         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2964                          PCI_DMA_FROMDEVICE);
2965
2966         if (hdr_len == 0) {
2967                 skb_put(skb, len);
2968                 return 0;
2969         } else {
2970                 unsigned int i, frag_len, frag_size, pages;
2971                 struct sw_pg *rx_pg;
2972                 u16 pg_cons = rxr->rx_pg_cons;
2973                 u16 pg_prod = rxr->rx_pg_prod;
2974
2975                 frag_size = len + 4 - hdr_len;
2976                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2977                 skb_put(skb, hdr_len);
2978
2979                 for (i = 0; i < pages; i++) {
2980                         dma_addr_t mapping_old;
2981
2982                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2983                         if (unlikely(frag_len <= 4)) {
2984                                 unsigned int tail = 4 - frag_len;
2985
2986                                 rxr->rx_pg_cons = pg_cons;
2987                                 rxr->rx_pg_prod = pg_prod;
2988                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2989                                                         pages - i);
2990                                 skb->len -= tail;
2991                                 if (i == 0) {
2992                                         skb->tail -= tail;
2993                                 } else {
2994                                         skb_frag_t *frag =
2995                                                 &skb_shinfo(skb)->frags[i - 1];
2996                                         frag->size -= tail;
2997                                         skb->data_len -= tail;
2998                                         skb->truesize -= tail;
2999                                 }
3000                                 return 0;
3001                         }
3002                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3003
3004                         /* Don't unmap yet.  If we're unable to allocate a new
3005                          * page, we need to recycle the page and the DMA addr.
3006                          */
3007                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3008                         if (i == pages - 1)
3009                                 frag_len -= 4;
3010
3011                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3012                         rx_pg->page = NULL;
3013
3014                         err = bnx2_alloc_rx_page(bp, rxr,
3015                                                  RX_PG_RING_IDX(pg_prod));
3016                         if (unlikely(err)) {
3017                                 rxr->rx_pg_cons = pg_cons;
3018                                 rxr->rx_pg_prod = pg_prod;
3019                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3020                                                         pages - i);
3021                                 return err;
3022                         }
3023
3024                         pci_unmap_page(bp->pdev, mapping_old,
3025                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3026
3027                         frag_size -= frag_len;
3028                         skb->data_len += frag_len;
3029                         skb->truesize += frag_len;
3030                         skb->len += frag_len;
3031
3032                         pg_prod = NEXT_RX_BD(pg_prod);
3033                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3034                 }
3035                 rxr->rx_pg_prod = pg_prod;
3036                 rxr->rx_pg_cons = pg_cons;
3037         }
3038         return 0;
3039 }
3040
3041 static inline u16
3042 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3043 {
3044         u16 cons;
3045
3046         /* Tell compiler that status block fields can change. */
3047         barrier();
3048         cons = *bnapi->hw_rx_cons_ptr;
3049         barrier();
3050         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3051                 cons++;
3052         return cons;
3053 }
3054
3055 static int
3056 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3057 {
3058         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3059         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3060         struct l2_fhdr *rx_hdr;
3061         int rx_pkt = 0, pg_ring_used = 0;
3062
3063         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3064         sw_cons = rxr->rx_cons;
3065         sw_prod = rxr->rx_prod;
3066
3067         /* Memory barrier necessary as speculative reads of the rx
3068          * buffer can be ahead of the index in the status block
3069          */
3070         rmb();
3071         while (sw_cons != hw_cons) {
3072                 unsigned int len, hdr_len;
3073                 u32 status;
3074                 struct sw_bd *rx_buf;
3075                 struct sk_buff *skb;
3076                 dma_addr_t dma_addr;
3077                 u16 vtag = 0;
3078                 int hw_vlan __maybe_unused = 0;
3079
3080                 sw_ring_cons = RX_RING_IDX(sw_cons);
3081                 sw_ring_prod = RX_RING_IDX(sw_prod);
3082
3083                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3084                 skb = rx_buf->skb;
3085
3086                 rx_buf->skb = NULL;
3087
3088                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3089
3090                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3091                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3092                         PCI_DMA_FROMDEVICE);
3093
3094                 rx_hdr = (struct l2_fhdr *) skb->data;
3095                 len = rx_hdr->l2_fhdr_pkt_len;
3096                 status = rx_hdr->l2_fhdr_status;
3097
3098                 hdr_len = 0;
3099                 if (status & L2_FHDR_STATUS_SPLIT) {
3100                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3101                         pg_ring_used = 1;
3102                 } else if (len > bp->rx_jumbo_thresh) {
3103                         hdr_len = bp->rx_jumbo_thresh;
3104                         pg_ring_used = 1;
3105                 }
3106
3107                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3108                                        L2_FHDR_ERRORS_PHY_DECODE |
3109                                        L2_FHDR_ERRORS_ALIGNMENT |
3110                                        L2_FHDR_ERRORS_TOO_SHORT |
3111                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3112
3113                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3114                                           sw_ring_prod);
3115                         if (pg_ring_used) {
3116                                 int pages;
3117
3118                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3119
3120                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3121                         }
3122                         goto next_rx;
3123                 }
3124
3125                 len -= 4;
3126
3127                 if (len <= bp->rx_copy_thresh) {
3128                         struct sk_buff *new_skb;
3129
3130                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3131                         if (new_skb == NULL) {
3132                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3133                                                   sw_ring_prod);
3134                                 goto next_rx;
3135                         }
3136
3137                         /* aligned copy */
3138                         skb_copy_from_linear_data_offset(skb,
3139                                                          BNX2_RX_OFFSET - 6,
3140                                       new_skb->data, len + 6);
3141                         skb_reserve(new_skb, 6);
3142                         skb_put(new_skb, len);
3143
3144                         bnx2_reuse_rx_skb(bp, rxr, skb,
3145                                 sw_ring_cons, sw_ring_prod);
3146
3147                         skb = new_skb;
3148                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3149                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3150                         goto next_rx;
3151
3152                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3153                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3154                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3155 #ifdef BCM_VLAN
3156                         if (bp->vlgrp)
3157                                 hw_vlan = 1;
3158                         else
3159 #endif
3160                         {
3161                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3162                                         __skb_push(skb, 4);
3163
3164                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3165                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3166                                 ve->h_vlan_TCI = htons(vtag);
3167                                 len += 4;
3168                         }
3169                 }
3170
3171                 skb->protocol = eth_type_trans(skb, bp->dev);
3172
3173                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3174                         (ntohs(skb->protocol) != 0x8100)) {
3175
3176                         dev_kfree_skb(skb);
3177                         goto next_rx;
3178
3179                 }
3180
3181                 skb->ip_summed = CHECKSUM_NONE;
3182                 if (bp->rx_csum &&
3183                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3184                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3185
3186                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3187                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3188                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3189                 }
3190
3191                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3192
3193 #ifdef BCM_VLAN
3194                 if (hw_vlan)
3195                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3196                 else
3197 #endif
3198                         netif_receive_skb(skb);
3199
3200                 rx_pkt++;
3201
3202 next_rx:
3203                 sw_cons = NEXT_RX_BD(sw_cons);
3204                 sw_prod = NEXT_RX_BD(sw_prod);
3205
3206                 if ((rx_pkt == budget))
3207                         break;
3208
3209                 /* Refresh hw_cons to see if there is new work */
3210                 if (sw_cons == hw_cons) {
3211                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3212                         rmb();
3213                 }
3214         }
3215         rxr->rx_cons = sw_cons;
3216         rxr->rx_prod = sw_prod;
3217
3218         if (pg_ring_used)
3219                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3220
3221         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3222
3223         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3224
3225         mmiowb();
3226
3227         return rx_pkt;
3228
3229 }
3230
3231 /* MSI ISR - The only difference between this and the INTx ISR
3232  * is that the MSI interrupt is always serviced.
3233  */
3234 static irqreturn_t
3235 bnx2_msi(int irq, void *dev_instance)
3236 {
3237         struct bnx2_napi *bnapi = dev_instance;
3238         struct bnx2 *bp = bnapi->bp;
3239
3240         prefetch(bnapi->status_blk.msi);
3241         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3242                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3243                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3244
3245         /* Return here if interrupt is disabled. */
3246         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3247                 return IRQ_HANDLED;
3248
3249         napi_schedule(&bnapi->napi);
3250
3251         return IRQ_HANDLED;
3252 }
3253
3254 static irqreturn_t
3255 bnx2_msi_1shot(int irq, void *dev_instance)
3256 {
3257         struct bnx2_napi *bnapi = dev_instance;
3258         struct bnx2 *bp = bnapi->bp;
3259
3260         prefetch(bnapi->status_blk.msi);
3261
3262         /* Return here if interrupt is disabled. */
3263         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3264                 return IRQ_HANDLED;
3265
3266         napi_schedule(&bnapi->napi);
3267
3268         return IRQ_HANDLED;
3269 }
3270
3271 static irqreturn_t
3272 bnx2_interrupt(int irq, void *dev_instance)
3273 {
3274         struct bnx2_napi *bnapi = dev_instance;
3275         struct bnx2 *bp = bnapi->bp;
3276         struct status_block *sblk = bnapi->status_blk.msi;
3277
3278         /* When using INTx, it is possible for the interrupt to arrive
3279          * at the CPU before the status block posted prior to the
3280          * interrupt. Reading a register will flush the status block.
3281          * When using MSI, the MSI message will always complete after
3282          * the status block write.
3283          */
3284         if ((sblk->status_idx == bnapi->last_status_idx) &&
3285             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3286              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3287                 return IRQ_NONE;
3288
3289         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3290                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3291                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3292
3293         /* Read back to deassert IRQ immediately to avoid too many
3294          * spurious interrupts.
3295          */
3296         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3297
3298         /* Return here if interrupt is shared and is disabled. */
3299         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3300                 return IRQ_HANDLED;
3301
3302         if (napi_schedule_prep(&bnapi->napi)) {
3303                 bnapi->last_status_idx = sblk->status_idx;
3304                 __napi_schedule(&bnapi->napi);
3305         }
3306
3307         return IRQ_HANDLED;
3308 }
3309
3310 static inline int
3311 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3312 {
3313         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3314         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3315
3316         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3317             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3318                 return 1;
3319         return 0;
3320 }
3321
3322 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3323                                  STATUS_ATTN_BITS_TIMER_ABORT)
3324
3325 static inline int
3326 bnx2_has_work(struct bnx2_napi *bnapi)
3327 {
3328         struct status_block *sblk = bnapi->status_blk.msi;
3329
3330         if (bnx2_has_fast_work(bnapi))
3331                 return 1;
3332
3333 #ifdef BCM_CNIC
3334         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3335                 return 1;
3336 #endif
3337
3338         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3339             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3340                 return 1;
3341
3342         return 0;
3343 }
3344
3345 static void
3346 bnx2_chk_missed_msi(struct bnx2 *bp)
3347 {
3348         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3349         u32 msi_ctrl;
3350
3351         if (bnx2_has_work(bnapi)) {
3352                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3353                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3354                         return;
3355
3356                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3357                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3358                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3359                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3360                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3361                 }
3362         }
3363
3364         bp->idle_chk_status_idx = bnapi->last_status_idx;
3365 }
3366
3367 #ifdef BCM_CNIC
3368 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3369 {
3370         struct cnic_ops *c_ops;
3371
3372         if (!bnapi->cnic_present)
3373                 return;
3374
3375         rcu_read_lock();
3376         c_ops = rcu_dereference(bp->cnic_ops);
3377         if (c_ops)
3378                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3379                                                       bnapi->status_blk.msi);
3380         rcu_read_unlock();
3381 }
3382 #endif
3383
3384 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3385 {
3386         struct status_block *sblk = bnapi->status_blk.msi;
3387         u32 status_attn_bits = sblk->status_attn_bits;
3388         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3389
3390         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3391             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3392
3393                 bnx2_phy_int(bp, bnapi);
3394
3395                 /* This is needed to take care of transient status
3396                  * during link changes.
3397                  */
3398                 REG_WR(bp, BNX2_HC_COMMAND,
3399                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3400                 REG_RD(bp, BNX2_HC_COMMAND);
3401         }
3402 }
3403
3404 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3405                           int work_done, int budget)
3406 {
3407         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3408         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3409
3410         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3411                 bnx2_tx_int(bp, bnapi, 0);
3412
3413         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3414                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3415
3416         return work_done;
3417 }
3418
3419 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3420 {
3421         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3422         struct bnx2 *bp = bnapi->bp;
3423         int work_done = 0;
3424         struct status_block_msix *sblk = bnapi->status_blk.msix;
3425
3426         while (1) {
3427                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3428                 if (unlikely(work_done >= budget))
3429                         break;
3430
3431                 bnapi->last_status_idx = sblk->status_idx;
3432                 /* status idx must be read before checking for more work. */
3433                 rmb();
3434                 if (likely(!bnx2_has_fast_work(bnapi))) {
3435
3436                         napi_complete(napi);
3437                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3438                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3439                                bnapi->last_status_idx);
3440                         break;
3441                 }
3442         }
3443         return work_done;
3444 }
3445
3446 static int bnx2_poll(struct napi_struct *napi, int budget)
3447 {
3448         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3449         struct bnx2 *bp = bnapi->bp;
3450         int work_done = 0;
3451         struct status_block *sblk = bnapi->status_blk.msi;
3452
3453         while (1) {
3454                 bnx2_poll_link(bp, bnapi);
3455
3456                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3457
3458 #ifdef BCM_CNIC
3459                 bnx2_poll_cnic(bp, bnapi);
3460 #endif
3461
3462                 /* bnapi->last_status_idx is used below to tell the hw how
3463                  * much work has been processed, so we must read it before
3464                  * checking for more work.
3465                  */
3466                 bnapi->last_status_idx = sblk->status_idx;
3467
3468                 if (unlikely(work_done >= budget))
3469                         break;
3470
3471                 rmb();
3472                 if (likely(!bnx2_has_work(bnapi))) {
3473                         napi_complete(napi);
3474                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3475                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477                                        bnapi->last_status_idx);
3478                                 break;
3479                         }
3480                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3481                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3483                                bnapi->last_status_idx);
3484
3485                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3486                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3487                                bnapi->last_status_idx);
3488                         break;
3489                 }
3490         }
3491
3492         return work_done;
3493 }
3494
3495 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3496  * from set_multicast.
3497  */
3498 static void
3499 bnx2_set_rx_mode(struct net_device *dev)
3500 {
3501         struct bnx2 *bp = netdev_priv(dev);
3502         u32 rx_mode, sort_mode;
3503         struct netdev_hw_addr *ha;
3504         int i;
3505
3506         if (!netif_running(dev))
3507                 return;
3508
3509         spin_lock_bh(&bp->phy_lock);
3510
3511         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3512                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3513         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3514 #ifdef BCM_VLAN
3515         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3516                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3517 #else
3518         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3519                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3520 #endif
3521         if (dev->flags & IFF_PROMISC) {
3522                 /* Promiscuous mode. */
3523                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3524                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3525                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3526         }
3527         else if (dev->flags & IFF_ALLMULTI) {
3528                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3529                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3530                                0xffffffff);
3531                 }
3532                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3533         }
3534         else {
3535                 /* Accept one or more multicast(s). */
3536                 struct dev_mc_list *mclist;
3537                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3538                 u32 regidx;
3539                 u32 bit;
3540                 u32 crc;
3541
3542                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3543
3544                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3545                      i++, mclist = mclist->next) {
3546
3547                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3548                         bit = crc & 0xff;
3549                         regidx = (bit & 0xe0) >> 5;
3550                         bit &= 0x1f;
3551                         mc_filter[regidx] |= (1 << bit);
3552                 }
3553
3554                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3555                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3556                                mc_filter[i]);
3557                 }
3558
3559                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3560         }
3561
3562         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3563                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3564                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3565                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3566         } else if (!(dev->flags & IFF_PROMISC)) {
3567                 /* Add all entries into to the match filter list */
3568                 i = 0;
3569                 list_for_each_entry(ha, &dev->uc.list, list) {
3570                         bnx2_set_mac_addr(bp, ha->addr,
3571                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3572                         sort_mode |= (1 <<
3573                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3574                         i++;
3575                 }
3576
3577         }
3578
3579         if (rx_mode != bp->rx_mode) {
3580                 bp->rx_mode = rx_mode;
3581                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3582         }
3583
3584         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3585         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3586         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3587
3588         spin_unlock_bh(&bp->phy_lock);
3589 }
3590
3591 static int __devinit
3592 check_fw_section(const struct firmware *fw,
3593                  const struct bnx2_fw_file_section *section,
3594                  u32 alignment, bool non_empty)
3595 {
3596         u32 offset = be32_to_cpu(section->offset);
3597         u32 len = be32_to_cpu(section->len);
3598
3599         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3600                 return -EINVAL;
3601         if ((non_empty && len == 0) || len > fw->size - offset ||
3602             len & (alignment - 1))
3603                 return -EINVAL;
3604         return 0;
3605 }
3606
3607 static int __devinit
3608 check_mips_fw_entry(const struct firmware *fw,
3609                     const struct bnx2_mips_fw_file_entry *entry)
3610 {
3611         if (check_fw_section(fw, &entry->text, 4, true) ||
3612             check_fw_section(fw, &entry->data, 4, false) ||
3613             check_fw_section(fw, &entry->rodata, 4, false))
3614                 return -EINVAL;
3615         return 0;
3616 }
3617
3618 static int __devinit
3619 bnx2_request_firmware(struct bnx2 *bp)
3620 {
3621         const char *mips_fw_file, *rv2p_fw_file;
3622         const struct bnx2_mips_fw_file *mips_fw;
3623         const struct bnx2_rv2p_fw_file *rv2p_fw;
3624         int rc;
3625
3626         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3627                 mips_fw_file = FW_MIPS_FILE_09;
3628                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3629                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3630                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3631                 else
3632                         rv2p_fw_file = FW_RV2P_FILE_09;
3633         } else {
3634                 mips_fw_file = FW_MIPS_FILE_06;
3635                 rv2p_fw_file = FW_RV2P_FILE_06;
3636         }
3637
3638         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3639         if (rc) {
3640                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3641                        mips_fw_file);
3642                 return rc;
3643         }
3644
3645         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3646         if (rc) {
3647                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3648                        rv2p_fw_file);
3649                 return rc;
3650         }
3651         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3652         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3653         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3654             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3655             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3656             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3657             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3658             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3659                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3660                        mips_fw_file);
3661                 return -EINVAL;
3662         }
3663         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3664             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3665             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3666                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3667                        rv2p_fw_file);
3668                 return -EINVAL;
3669         }
3670
3671         return 0;
3672 }
3673
3674 static u32
3675 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3676 {
3677         switch (idx) {
3678         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3679                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3680                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3681                 break;
3682         }
3683         return rv2p_code;
3684 }
3685
3686 static int
3687 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3688              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3689 {
3690         u32 rv2p_code_len, file_offset;
3691         __be32 *rv2p_code;
3692         int i;
3693         u32 val, cmd, addr;
3694
3695         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3696         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3697
3698         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3699
3700         if (rv2p_proc == RV2P_PROC1) {
3701                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3702                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3703         } else {
3704                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3705                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3706         }
3707
3708         for (i = 0; i < rv2p_code_len; i += 8) {
3709                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3710                 rv2p_code++;
3711                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3712                 rv2p_code++;
3713
3714                 val = (i / 8) | cmd;
3715                 REG_WR(bp, addr, val);
3716         }
3717
3718         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3719         for (i = 0; i < 8; i++) {
3720                 u32 loc, code;
3721
3722                 loc = be32_to_cpu(fw_entry->fixup[i]);
3723                 if (loc && ((loc * 4) < rv2p_code_len)) {
3724                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3725                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3726                         code = be32_to_cpu(*(rv2p_code + loc));
3727                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3728                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3729
3730                         val = (loc / 2) | cmd;
3731                         REG_WR(bp, addr, val);
3732                 }
3733         }
3734
3735         /* Reset the processor, un-stall is done later. */
3736         if (rv2p_proc == RV2P_PROC1) {
3737                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3738         }
3739         else {
3740                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3741         }
3742
3743         return 0;
3744 }
3745
3746 static int
3747 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3748             const struct bnx2_mips_fw_file_entry *fw_entry)
3749 {
3750         u32 addr, len, file_offset;
3751         __be32 *data;
3752         u32 offset;
3753         u32 val;
3754
3755         /* Halt the CPU. */
3756         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3757         val |= cpu_reg->mode_value_halt;
3758         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3759         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3760
3761         /* Load the Text area. */
3762         addr = be32_to_cpu(fw_entry->text.addr);
3763         len = be32_to_cpu(fw_entry->text.len);
3764         file_offset = be32_to_cpu(fw_entry->text.offset);
3765         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3766
3767         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3768         if (len) {
3769                 int j;
3770
3771                 for (j = 0; j < (len / 4); j++, offset += 4)
3772                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3773         }
3774
3775         /* Load the Data area. */
3776         addr = be32_to_cpu(fw_entry->data.addr);
3777         len = be32_to_cpu(fw_entry->data.len);
3778         file_offset = be32_to_cpu(fw_entry->data.offset);
3779         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3780
3781         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3782         if (len) {
3783                 int j;
3784
3785                 for (j = 0; j < (len / 4); j++, offset += 4)
3786                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3787         }
3788
3789         /* Load the Read-Only area. */
3790         addr = be32_to_cpu(fw_entry->rodata.addr);
3791         len = be32_to_cpu(fw_entry->rodata.len);
3792         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3793         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3794
3795         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3796         if (len) {
3797                 int j;
3798
3799                 for (j = 0; j < (len / 4); j++, offset += 4)
3800                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3801         }
3802
3803         /* Clear the pre-fetch instruction. */
3804         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3805
3806         val = be32_to_cpu(fw_entry->start_addr);
3807         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3808
3809         /* Start the CPU. */
3810         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3811         val &= ~cpu_reg->mode_value_halt;
3812         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3814
3815         return 0;
3816 }
3817
3818 static int
3819 bnx2_init_cpus(struct bnx2 *bp)
3820 {
3821         const struct bnx2_mips_fw_file *mips_fw =
3822                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3823         const struct bnx2_rv2p_fw_file *rv2p_fw =
3824                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3825         int rc;
3826
3827         /* Initialize the RV2P processor. */
3828         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3829         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3830
3831         /* Initialize the RX Processor. */
3832         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3833         if (rc)
3834                 goto init_cpu_err;
3835
3836         /* Initialize the TX Processor. */
3837         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3838         if (rc)
3839                 goto init_cpu_err;
3840
3841         /* Initialize the TX Patch-up Processor. */
3842         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3843         if (rc)
3844                 goto init_cpu_err;
3845
3846         /* Initialize the Completion Processor. */
3847         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3848         if (rc)
3849                 goto init_cpu_err;
3850
3851         /* Initialize the Command Processor. */
3852         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3853
3854 init_cpu_err:
3855         return rc;
3856 }
3857
3858 static int
3859 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3860 {
3861         u16 pmcsr;
3862
3863         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3864
3865         switch (state) {
3866         case PCI_D0: {
3867                 u32 val;
3868
3869                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3870                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3871                         PCI_PM_CTRL_PME_STATUS);
3872
3873                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3874                         /* delay required during transition out of D3hot */
3875                         msleep(20);
3876
3877                 val = REG_RD(bp, BNX2_EMAC_MODE);
3878                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3879                 val &= ~BNX2_EMAC_MODE_MPKT;
3880                 REG_WR(bp, BNX2_EMAC_MODE, val);
3881
3882                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3883                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3884                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3885                 break;
3886         }
3887         case PCI_D3hot: {
3888                 int i;
3889                 u32 val, wol_msg;
3890
3891                 if (bp->wol) {
3892                         u32 advertising;
3893                         u8 autoneg;
3894
3895                         autoneg = bp->autoneg;
3896                         advertising = bp->advertising;
3897
3898                         if (bp->phy_port == PORT_TP) {
3899                                 bp->autoneg = AUTONEG_SPEED;
3900                                 bp->advertising = ADVERTISED_10baseT_Half |
3901                                         ADVERTISED_10baseT_Full |
3902                                         ADVERTISED_100baseT_Half |
3903                                         ADVERTISED_100baseT_Full |
3904                                         ADVERTISED_Autoneg;
3905                         }
3906
3907                         spin_lock_bh(&bp->phy_lock);
3908                         bnx2_setup_phy(bp, bp->phy_port);
3909                         spin_unlock_bh(&bp->phy_lock);
3910
3911                         bp->autoneg = autoneg;
3912                         bp->advertising = advertising;
3913
3914                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3915
3916                         val = REG_RD(bp, BNX2_EMAC_MODE);
3917
3918                         /* Enable port mode. */
3919                         val &= ~BNX2_EMAC_MODE_PORT;
3920                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3921                                BNX2_EMAC_MODE_ACPI_RCVD |
3922                                BNX2_EMAC_MODE_MPKT;
3923                         if (bp->phy_port == PORT_TP)
3924                                 val |= BNX2_EMAC_MODE_PORT_MII;
3925                         else {
3926                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3927                                 if (bp->line_speed == SPEED_2500)
3928                                         val |= BNX2_EMAC_MODE_25G_MODE;
3929                         }
3930
3931                         REG_WR(bp, BNX2_EMAC_MODE, val);
3932
3933                         /* receive all multicast */
3934                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3935                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3936                                        0xffffffff);
3937                         }
3938                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3939                                BNX2_EMAC_RX_MODE_SORT_MODE);
3940
3941                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3942                               BNX2_RPM_SORT_USER0_MC_EN;
3943                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3944                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3945                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3946                                BNX2_RPM_SORT_USER0_ENA);
3947
3948                         /* Need to enable EMAC and RPM for WOL. */
3949                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3950                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3951                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3952                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3953
3954                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3955                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3956                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3957
3958                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3959                 }
3960                 else {
3961                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3962                 }
3963
3964                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3965                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3966                                      1, 0);
3967
3968                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3969                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3970                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3971
3972                         if (bp->wol)
3973                                 pmcsr |= 3;
3974                 }
3975                 else {
3976                         pmcsr |= 3;
3977                 }
3978                 if (bp->wol) {
3979                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3980                 }
3981                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3982                                       pmcsr);
3983
3984                 /* No more memory access after this point until
3985                  * device is brought back to D0.
3986                  */
3987                 udelay(50);
3988                 break;
3989         }
3990         default:
3991                 return -EINVAL;
3992         }
3993         return 0;
3994 }
3995
3996 static int
3997 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3998 {
3999         u32 val;
4000         int j;
4001
4002         /* Request access to the flash interface. */
4003         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4004         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4005                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4006                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4007                         break;
4008
4009                 udelay(5);
4010         }
4011
4012         if (j >= NVRAM_TIMEOUT_COUNT)
4013                 return -EBUSY;
4014
4015         return 0;
4016 }
4017
4018 static int
4019 bnx2_release_nvram_lock(struct bnx2 *bp)
4020 {
4021         int j;
4022         u32 val;
4023
4024         /* Relinquish nvram interface. */
4025         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4026
4027         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4028                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4029                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4030                         break;
4031
4032                 udelay(5);
4033         }
4034
4035         if (j >= NVRAM_TIMEOUT_COUNT)
4036                 return -EBUSY;
4037
4038         return 0;
4039 }
4040
4041
4042 static int
4043 bnx2_enable_nvram_write(struct bnx2 *bp)
4044 {
4045         u32 val;
4046
4047         val = REG_RD(bp, BNX2_MISC_CFG);
4048         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4049
4050         if (bp->flash_info->flags & BNX2_NV_WREN) {
4051                 int j;
4052
4053                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4054                 REG_WR(bp, BNX2_NVM_COMMAND,
4055                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4056
4057                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058                         udelay(5);
4059
4060                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4061                         if (val & BNX2_NVM_COMMAND_DONE)
4062                                 break;
4063                 }
4064
4065                 if (j >= NVRAM_TIMEOUT_COUNT)
4066                         return -EBUSY;
4067         }
4068         return 0;
4069 }
4070
4071 static void
4072 bnx2_disable_nvram_write(struct bnx2 *bp)
4073 {
4074         u32 val;
4075
4076         val = REG_RD(bp, BNX2_MISC_CFG);
4077         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4078 }
4079
4080
4081 static void
4082 bnx2_enable_nvram_access(struct bnx2 *bp)
4083 {
4084         u32 val;
4085
4086         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4087         /* Enable both bits, even on read. */
4088         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4089                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4090 }
4091
4092 static void
4093 bnx2_disable_nvram_access(struct bnx2 *bp)
4094 {
4095         u32 val;
4096
4097         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4098         /* Disable both bits, even after read. */
4099         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4100                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4101                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4102 }
4103
4104 static int
4105 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4106 {
4107         u32 cmd;
4108         int j;
4109
4110         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4111                 /* Buffered flash, no erase needed */
4112                 return 0;
4113
4114         /* Build an erase command */
4115         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4116               BNX2_NVM_COMMAND_DOIT;
4117
4118         /* Need to clear DONE bit separately. */
4119         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4120
4121         /* Address of the NVRAM to read from. */
4122         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4123
4124         /* Issue an erase command. */
4125         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4126
4127         /* Wait for completion. */
4128         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4129                 u32 val;
4130
4131                 udelay(5);
4132
4133                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4134                 if (val & BNX2_NVM_COMMAND_DONE)
4135                         break;
4136         }
4137
4138         if (j >= NVRAM_TIMEOUT_COUNT)
4139                 return -EBUSY;
4140
4141         return 0;
4142 }
4143
4144 static int
4145 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4146 {
4147         u32 cmd;
4148         int j;
4149
4150         /* Build the command word. */
4151         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4152
4153         /* Calculate an offset of a buffered flash, not needed for 5709. */
4154         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4155                 offset = ((offset / bp->flash_info->page_size) <<
4156                            bp->flash_info->page_bits) +
4157                           (offset % bp->flash_info->page_size);
4158         }
4159
4160         /* Need to clear DONE bit separately. */
4161         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4162
4163         /* Address of the NVRAM to read from. */
4164         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4165
4166         /* Issue a read command. */
4167         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4168
4169         /* Wait for completion. */
4170         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4171                 u32 val;
4172
4173                 udelay(5);
4174
4175                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4176                 if (val & BNX2_NVM_COMMAND_DONE) {
4177                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4178                         memcpy(ret_val, &v, 4);
4179                         break;
4180                 }
4181         }
4182         if (j >= NVRAM_TIMEOUT_COUNT)
4183                 return -EBUSY;
4184
4185         return 0;
4186 }
4187
4188
4189 static int
4190 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4191 {
4192         u32 cmd;
4193         __be32 val32;
4194         int j;
4195
4196         /* Build the command word. */
4197         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4198
4199         /* Calculate an offset of a buffered flash, not needed for 5709. */
4200         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4201                 offset = ((offset / bp->flash_info->page_size) <<
4202                           bp->flash_info->page_bits) +
4203                          (offset % bp->flash_info->page_size);
4204         }
4205
4206         /* Need to clear DONE bit separately. */
4207         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4208
4209         memcpy(&val32, val, 4);
4210
4211         /* Write the data. */
4212         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4213
4214         /* Address of the NVRAM to write to. */
4215         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4216
4217         /* Issue the write command. */
4218         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4219
4220         /* Wait for completion. */
4221         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4222                 udelay(5);
4223
4224                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4225                         break;
4226         }
4227         if (j >= NVRAM_TIMEOUT_COUNT)
4228                 return -EBUSY;
4229
4230         return 0;
4231 }
4232
4233 static int
4234 bnx2_init_nvram(struct bnx2 *bp)
4235 {
4236         u32 val;
4237         int j, entry_count, rc = 0;
4238         const struct flash_spec *flash;
4239
4240         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4241                 bp->flash_info = &flash_5709;
4242                 goto get_flash_size;
4243         }
4244
4245         /* Determine the selected interface. */
4246         val = REG_RD(bp, BNX2_NVM_CFG1);
4247
4248         entry_count = ARRAY_SIZE(flash_table);
4249
4250         if (val & 0x40000000) {
4251
4252                 /* Flash interface has been reconfigured */
4253                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4254                      j++, flash++) {
4255                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4256                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4257                                 bp->flash_info = flash;
4258                                 break;
4259                         }
4260                 }
4261         }
4262         else {
4263                 u32 mask;
4264                 /* Not yet been reconfigured */
4265
4266                 if (val & (1 << 23))
4267                         mask = FLASH_BACKUP_STRAP_MASK;
4268                 else
4269                         mask = FLASH_STRAP_MASK;
4270
4271                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4272                         j++, flash++) {
4273
4274                         if ((val & mask) == (flash->strapping & mask)) {
4275                                 bp->flash_info = flash;
4276
4277                                 /* Request access to the flash interface. */
4278                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4279                                         return rc;
4280
4281                                 /* Enable access to flash interface */
4282                                 bnx2_enable_nvram_access(bp);
4283
4284                                 /* Reconfigure the flash interface */
4285                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4286                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4287                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4288                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4289
4290                                 /* Disable access to flash interface */
4291                                 bnx2_disable_nvram_access(bp);
4292                                 bnx2_release_nvram_lock(bp);
4293
4294                                 break;
4295                         }
4296                 }
4297         } /* if (val & 0x40000000) */
4298
4299         if (j == entry_count) {
4300                 bp->flash_info = NULL;
4301                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4302                 return -ENODEV;
4303         }
4304
4305 get_flash_size:
4306         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4307         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4308         if (val)
4309                 bp->flash_size = val;
4310         else
4311                 bp->flash_size = bp->flash_info->total_size;
4312
4313         return rc;
4314 }
4315
4316 static int
4317 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4318                 int buf_size)
4319 {
4320         int rc = 0;
4321         u32 cmd_flags, offset32, len32, extra;
4322
4323         if (buf_size == 0)
4324                 return 0;
4325
4326         /* Request access to the flash interface. */
4327         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4328                 return rc;
4329
4330         /* Enable access to flash interface */
4331         bnx2_enable_nvram_access(bp);
4332
4333         len32 = buf_size;
4334         offset32 = offset;
4335         extra = 0;
4336
4337         cmd_flags = 0;
4338
4339         if (offset32 & 3) {
4340                 u8 buf[4];
4341                 u32 pre_len;
4342
4343                 offset32 &= ~3;
4344                 pre_len = 4 - (offset & 3);
4345
4346                 if (pre_len >= len32) {
4347                         pre_len = len32;
4348                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4349                                     BNX2_NVM_COMMAND_LAST;
4350                 }
4351                 else {
4352                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4353                 }
4354
4355                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4356
4357                 if (rc)
4358                         return rc;
4359
4360                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4361
4362                 offset32 += 4;
4363                 ret_buf += pre_len;
4364                 len32 -= pre_len;
4365         }
4366         if (len32 & 3) {
4367                 extra = 4 - (len32 & 3);
4368                 len32 = (len32 + 4) & ~3;
4369         }
4370
4371         if (len32 == 4) {
4372                 u8 buf[4];
4373
4374                 if (cmd_flags)
4375                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4376                 else
4377                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4378                                     BNX2_NVM_COMMAND_LAST;
4379
4380                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4381
4382                 memcpy(ret_buf, buf, 4 - extra);
4383         }
4384         else if (len32 > 0) {
4385                 u8 buf[4];
4386
4387                 /* Read the first word. */
4388                 if (cmd_flags)
4389                         cmd_flags = 0;
4390                 else
4391                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4392
4393                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4394
4395                 /* Advance to the next dword. */
4396                 offset32 += 4;
4397                 ret_buf += 4;
4398                 len32 -= 4;
4399
4400                 while (len32 > 4 && rc == 0) {
4401                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4402
4403                         /* Advance to the next dword. */
4404                         offset32 += 4;
4405                         ret_buf += 4;
4406                         len32 -= 4;
4407                 }
4408
4409                 if (rc)
4410                         return rc;
4411
4412                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4413                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4414
4415                 memcpy(ret_buf, buf, 4 - extra);
4416         }
4417
4418         /* Disable access to flash interface */
4419         bnx2_disable_nvram_access(bp);
4420
4421         bnx2_release_nvram_lock(bp);
4422
4423         return rc;
4424 }
4425
4426 static int
4427 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4428                 int buf_size)
4429 {
4430         u32 written, offset32, len32;
4431         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4432         int rc = 0;
4433         int align_start, align_end;
4434
4435         buf = data_buf;
4436         offset32 = offset;
4437         len32 = buf_size;
4438         align_start = align_end = 0;
4439
4440         if ((align_start = (offset32 & 3))) {
4441                 offset32 &= ~3;
4442                 len32 += align_start;
4443                 if (len32 < 4)
4444                         len32 = 4;
4445                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4446                         return rc;
4447         }
4448
4449         if (len32 & 3) {
4450                 align_end = 4 - (len32 & 3);
4451                 len32 += align_end;
4452                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4453                         return rc;
4454         }
4455
4456         if (align_start || align_end) {
4457                 align_buf = kmalloc(len32, GFP_KERNEL);
4458                 if (align_buf == NULL)
4459                         return -ENOMEM;
4460                 if (align_start) {
4461                         memcpy(align_buf, start, 4);
4462                 }
4463                 if (align_end) {
4464                         memcpy(align_buf + len32 - 4, end, 4);
4465                 }
4466                 memcpy(align_buf + align_start, data_buf, buf_size);
4467                 buf = align_buf;
4468         }
4469
4470         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4471                 flash_buffer = kmalloc(264, GFP_KERNEL);
4472                 if (flash_buffer == NULL) {
4473                         rc = -ENOMEM;
4474                         goto nvram_write_end;
4475                 }
4476         }
4477
4478         written = 0;
4479         while ((written < len32) && (rc == 0)) {
4480                 u32 page_start, page_end, data_start, data_end;
4481                 u32 addr, cmd_flags;
4482                 int i;
4483
4484                 /* Find the page_start addr */
4485                 page_start = offset32 + written;
4486                 page_start -= (page_start % bp->flash_info->page_size);
4487                 /* Find the page_end addr */
4488                 page_end = page_start + bp->flash_info->page_size;
4489                 /* Find the data_start addr */
4490                 data_start = (written == 0) ? offset32 : page_start;
4491                 /* Find the data_end addr */
4492                 data_end = (page_end > offset32 + len32) ?
4493                         (offset32 + len32) : page_end;
4494
4495                 /* Request access to the flash interface. */
4496                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4497                         goto nvram_write_end;
4498
4499                 /* Enable access to flash interface */
4500                 bnx2_enable_nvram_access(bp);
4501
4502                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4503                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4504                         int j;
4505
4506                         /* Read the whole page into the buffer
4507                          * (non-buffer flash only) */
4508                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4509                                 if (j == (bp->flash_info->page_size - 4)) {
4510                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4511                                 }
4512                                 rc = bnx2_nvram_read_dword(bp,
4513                                         page_start + j,
4514                                         &flash_buffer[j],
4515                                         cmd_flags);
4516
4517                                 if (rc)
4518                                         goto nvram_write_end;
4519
4520                                 cmd_flags = 0;
4521                         }
4522                 }
4523
4524                 /* Enable writes to flash interface (unlock write-protect) */
4525                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4526                         goto nvram_write_end;
4527
4528                 /* Loop to write back the buffer data from page_start to
4529                  * data_start */
4530                 i = 0;
4531                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4532                         /* Erase the page */
4533                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4534                                 goto nvram_write_end;
4535
4536                         /* Re-enable the write again for the actual write */
4537                         bnx2_enable_nvram_write(bp);
4538
4539                         for (addr = page_start; addr < data_start;
4540                                 addr += 4, i += 4) {
4541
4542                                 rc = bnx2_nvram_write_dword(bp, addr,
4543                                         &flash_buffer[i], cmd_flags);
4544
4545                                 if (rc != 0)
4546                                         goto nvram_write_end;
4547
4548                                 cmd_flags = 0;
4549                         }
4550                 }
4551
4552                 /* Loop to write the new data from data_start to data_end */
4553                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4554                         if ((addr == page_end - 4) ||
4555                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4556                                  (addr == data_end - 4))) {
4557
4558                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4559                         }
4560                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4561                                 cmd_flags);
4562
4563                         if (rc != 0)
4564                                 goto nvram_write_end;
4565
4566                         cmd_flags = 0;
4567                         buf += 4;
4568                 }
4569
4570                 /* Loop to write back the buffer data from data_end
4571                  * to page_end */
4572                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4573                         for (addr = data_end; addr < page_end;
4574                                 addr += 4, i += 4) {
4575
4576                                 if (addr == page_end-4) {
4577                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4578                                 }
4579                                 rc = bnx2_nvram_write_dword(bp, addr,
4580                                         &flash_buffer[i], cmd_flags);
4581
4582                                 if (rc != 0)
4583                                         goto nvram_write_end;
4584
4585                                 cmd_flags = 0;
4586                         }
4587                 }
4588
4589                 /* Disable writes to flash interface (lock write-protect) */
4590                 bnx2_disable_nvram_write(bp);
4591
4592                 /* Disable access to flash interface */
4593                 bnx2_disable_nvram_access(bp);
4594                 bnx2_release_nvram_lock(bp);
4595
4596                 /* Increment written */
4597                 written += data_end - data_start;
4598         }
4599
4600 nvram_write_end:
4601         kfree(flash_buffer);
4602         kfree(align_buf);
4603         return rc;
4604 }
4605
4606 static void
4607 bnx2_init_fw_cap(struct bnx2 *bp)
4608 {
4609         u32 val, sig = 0;
4610
4611         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4612         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4613
4614         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4615                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4616
4617         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4618         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4619                 return;
4620
4621         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4622                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4623                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4624         }
4625
4626         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4627             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4628                 u32 link;
4629
4630                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4631
4632                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4633                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4634                         bp->phy_port = PORT_FIBRE;
4635                 else
4636                         bp->phy_port = PORT_TP;
4637
4638                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4639                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4640         }
4641
4642         if (netif_running(bp->dev) && sig)
4643                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4644 }
4645
4646 static void
4647 bnx2_setup_msix_tbl(struct bnx2 *bp)
4648 {
4649         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4650
4651         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4652         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4653 }
4654
4655 static int
4656 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4657 {
4658         u32 val;
4659         int i, rc = 0;
4660         u8 old_port;
4661
4662         /* Wait for the current PCI transaction to complete before
4663          * issuing a reset. */
4664         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4665                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4666                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4667                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4668                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4669         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4670         udelay(5);
4671
4672         /* Wait for the firmware to tell us it is ok to issue a reset. */
4673         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4674
4675         /* Deposit a driver reset signature so the firmware knows that
4676          * this is a soft reset. */
4677         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4678                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4679
4680         /* Do a dummy read to force the chip to complete all current transaction
4681          * before we issue a reset. */
4682         val = REG_RD(bp, BNX2_MISC_ID);
4683
4684         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4685                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4686                 REG_RD(bp, BNX2_MISC_COMMAND);
4687                 udelay(5);
4688
4689                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4690                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4691
4692                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4693
4694         } else {
4695                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4696                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4697                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4698
4699                 /* Chip reset. */
4700                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4701
4702                 /* Reading back any register after chip reset will hang the
4703                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4704                  * of margin for write posting.
4705                  */
4706                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4707                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4708                         msleep(20);
4709
4710                 /* Reset takes approximate 30 usec */
4711                 for (i = 0; i < 10; i++) {
4712                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4713                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4715                                 break;
4716                         udelay(10);
4717                 }
4718
4719                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4720                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4721                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4722                         return -EBUSY;
4723                 }
4724         }
4725
4726         /* Make sure byte swapping is properly configured. */
4727         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4728         if (val != 0x01020304) {
4729                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4730                 return -ENODEV;
4731         }
4732
4733         /* Wait for the firmware to finish its initialization. */
4734         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4735         if (rc)
4736                 return rc;
4737
4738         spin_lock_bh(&bp->phy_lock);
4739         old_port = bp->phy_port;
4740         bnx2_init_fw_cap(bp);
4741         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4742             old_port != bp->phy_port)
4743                 bnx2_set_default_remote_link(bp);
4744         spin_unlock_bh(&bp->phy_lock);
4745
4746         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4747                 /* Adjust the voltage regular to two steps lower.  The default
4748                  * of this register is 0x0000000e. */
4749                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4750
4751                 /* Remove bad rbuf memory from the free pool. */
4752                 rc = bnx2_alloc_bad_rbuf(bp);
4753         }
4754
4755         if (bp->flags & BNX2_FLAG_USING_MSIX)
4756                 bnx2_setup_msix_tbl(bp);
4757
4758         return rc;
4759 }
4760
4761 static int
4762 bnx2_init_chip(struct bnx2 *bp)
4763 {
4764         u32 val, mtu;
4765         int rc, i;
4766
4767         /* Make sure the interrupt is not active. */
4768         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4769
4770         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4771               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4772 #ifdef __BIG_ENDIAN
4773               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4774 #endif
4775               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4776               DMA_READ_CHANS << 12 |
4777               DMA_WRITE_CHANS << 16;
4778
4779         val |= (0x2 << 20) | (1 << 11);
4780
4781         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4782                 val |= (1 << 23);
4783
4784         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4785             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4786                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4787
4788         REG_WR(bp, BNX2_DMA_CONFIG, val);
4789
4790         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4791                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4792                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4793                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4794         }
4795
4796         if (bp->flags & BNX2_FLAG_PCIX) {
4797                 u16 val16;
4798
4799                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                      &val16);
4801                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4802                                       val16 & ~PCI_X_CMD_ERO);
4803         }
4804
4805         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4806                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4807                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4808                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4809
4810         /* Initialize context mapping and zero out the quick contexts.  The
4811          * context block must have already been enabled. */
4812         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4813                 rc = bnx2_init_5709_context(bp);
4814                 if (rc)
4815                         return rc;
4816         } else
4817                 bnx2_init_context(bp);
4818
4819         if ((rc = bnx2_init_cpus(bp)) != 0)
4820                 return rc;
4821
4822         bnx2_init_nvram(bp);
4823
4824         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4825
4826         val = REG_RD(bp, BNX2_MQ_CONFIG);
4827         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4828         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4829         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4830                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4831                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4832                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4833         }
4834
4835         REG_WR(bp, BNX2_MQ_CONFIG, val);
4836
4837         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4838         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4839         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4840
4841         val = (BCM_PAGE_BITS - 8) << 24;
4842         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4843
4844         /* Configure page size. */
4845         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4846         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4847         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4848         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4849
4850         val = bp->mac_addr[0] +
4851               (bp->mac_addr[1] << 8) +
4852               (bp->mac_addr[2] << 16) +
4853               bp->mac_addr[3] +
4854               (bp->mac_addr[4] << 8) +
4855               (bp->mac_addr[5] << 16);
4856         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4857
4858         /* Program the MTU.  Also include 4 bytes for CRC32. */
4859         mtu = bp->dev->mtu;
4860         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4861         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4862                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4863         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4864
4865         if (mtu < 1500)
4866                 mtu = 1500;
4867
4868         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4869         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4870         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4871
4872         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4873         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4874                 bp->bnx2_napi[i].last_status_idx = 0;
4875
4876         bp->idle_chk_status_idx = 0xffff;
4877
4878         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4879
4880         /* Set up how to generate a link change interrupt. */
4881         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4882
4883         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4884                (u64) bp->status_blk_mapping & 0xffffffff);
4885         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4886
4887         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4888                (u64) bp->stats_blk_mapping & 0xffffffff);
4889         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4890                (u64) bp->stats_blk_mapping >> 32);
4891
4892         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4893                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4894
4895         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4896                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4897
4898         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4899                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4900
4901         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4902
4903         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4904
4905         REG_WR(bp, BNX2_HC_COM_TICKS,
4906                (bp->com_ticks_int << 16) | bp->com_ticks);
4907
4908         REG_WR(bp, BNX2_HC_CMD_TICKS,
4909                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4910
4911         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4912                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4913         else
4914                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4915         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4916
4917         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4918                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4919         else {
4920                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4921                       BNX2_HC_CONFIG_COLLECT_STATS;
4922         }
4923
4924         if (bp->irq_nvecs > 1) {
4925                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4926                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4927
4928                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4929         }
4930
4931         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4932                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4933
4934         REG_WR(bp, BNX2_HC_CONFIG, val);
4935
4936         for (i = 1; i < bp->irq_nvecs; i++) {
4937                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4938                            BNX2_HC_SB_CONFIG_1;
4939
4940                 REG_WR(bp, base,
4941                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4942                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4943                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4944
4945                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4946                         (bp->tx_quick_cons_trip_int << 16) |
4947                          bp->tx_quick_cons_trip);
4948
4949                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4950                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4951
4952                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4953                        (bp->rx_quick_cons_trip_int << 16) |
4954                         bp->rx_quick_cons_trip);
4955
4956                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4957                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4958         }
4959
4960         /* Clear internal stats counters. */
4961         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4962
4963         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4964
4965         /* Initialize the receive filter. */
4966         bnx2_set_rx_mode(bp->dev);
4967
4968         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4969                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4970                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4971                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4972         }
4973         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4974                           1, 0);
4975
4976         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4977         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4978
4979         udelay(20);
4980
4981         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4982
4983         return rc;
4984 }
4985
4986 static void
4987 bnx2_clear_ring_states(struct bnx2 *bp)
4988 {
4989         struct bnx2_napi *bnapi;
4990         struct bnx2_tx_ring_info *txr;
4991         struct bnx2_rx_ring_info *rxr;
4992         int i;
4993
4994         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4995                 bnapi = &bp->bnx2_napi[i];
4996                 txr = &bnapi->tx_ring;
4997                 rxr = &bnapi->rx_ring;
4998
4999                 txr->tx_cons = 0;
5000                 txr->hw_tx_cons = 0;
5001                 rxr->rx_prod_bseq = 0;
5002                 rxr->rx_prod = 0;
5003                 rxr->rx_cons = 0;
5004                 rxr->rx_pg_prod = 0;
5005                 rxr->rx_pg_cons = 0;
5006         }
5007 }
5008
5009 static void
5010 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5011 {
5012         u32 val, offset0, offset1, offset2, offset3;
5013         u32 cid_addr = GET_CID_ADDR(cid);
5014
5015         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5016                 offset0 = BNX2_L2CTX_TYPE_XI;
5017                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5018                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5019                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5020         } else {
5021                 offset0 = BNX2_L2CTX_TYPE;
5022                 offset1 = BNX2_L2CTX_CMD_TYPE;
5023                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5024                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5025         }
5026         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5027         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5028
5029         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5030         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5031
5032         val = (u64) txr->tx_desc_mapping >> 32;
5033         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5034
5035         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5036         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5037 }
5038
5039 static void
5040 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5041 {
5042         struct tx_bd *txbd;
5043         u32 cid = TX_CID;
5044         struct bnx2_napi *bnapi;
5045         struct bnx2_tx_ring_info *txr;
5046
5047         bnapi = &bp->bnx2_napi[ring_num];
5048         txr = &bnapi->tx_ring;
5049
5050         if (ring_num == 0)
5051                 cid = TX_CID;
5052         else
5053                 cid = TX_TSS_CID + ring_num - 1;
5054
5055         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5056
5057         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5058
5059         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5060         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5061
5062         txr->tx_prod = 0;
5063         txr->tx_prod_bseq = 0;
5064
5065         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5066         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5067
5068         bnx2_init_tx_context(bp, cid, txr);
5069 }
5070
5071 static void
5072 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5073                      int num_rings)
5074 {
5075         int i;
5076         struct rx_bd *rxbd;
5077
5078         for (i = 0; i < num_rings; i++) {
5079                 int j;
5080
5081                 rxbd = &rx_ring[i][0];
5082                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5083                         rxbd->rx_bd_len = buf_size;
5084                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5085                 }
5086                 if (i == (num_rings - 1))
5087                         j = 0;
5088                 else
5089                         j = i + 1;
5090                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5091                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5092         }
5093 }
5094
5095 static void
5096 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5097 {
5098         int i;
5099         u16 prod, ring_prod;
5100         u32 cid, rx_cid_addr, val;
5101         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5102         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5103
5104         if (ring_num == 0)
5105                 cid = RX_CID;
5106         else
5107                 cid = RX_RSS_CID + ring_num - 1;
5108
5109         rx_cid_addr = GET_CID_ADDR(cid);
5110
5111         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5112                              bp->rx_buf_use_size, bp->rx_max_ring);
5113
5114         bnx2_init_rx_context(bp, cid);
5115
5116         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5117                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5118                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5119         }
5120
5121         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5122         if (bp->rx_pg_ring_size) {
5123                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5124                                      rxr->rx_pg_desc_mapping,
5125                                      PAGE_SIZE, bp->rx_max_pg_ring);
5126                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5127                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5128                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5129                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5130
5131                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5132                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5133
5134                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5135                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5136
5137                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5138                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5139         }
5140
5141         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5142         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5143
5144         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5145         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5146
5147         ring_prod = prod = rxr->rx_pg_prod;
5148         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5149                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5150                         break;
5151                 prod = NEXT_RX_BD(prod);
5152                 ring_prod = RX_PG_RING_IDX(prod);
5153         }
5154         rxr->rx_pg_prod = prod;
5155
5156         ring_prod = prod = rxr->rx_prod;
5157         for (i = 0; i < bp->rx_ring_size; i++) {
5158                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5159                         break;
5160                 prod = NEXT_RX_BD(prod);
5161                 ring_prod = RX_RING_IDX(prod);
5162         }
5163         rxr->rx_prod = prod;
5164
5165         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5166         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5167         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5168
5169         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5170         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5171
5172         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5173 }
5174
5175 static void
5176 bnx2_init_all_rings(struct bnx2 *bp)
5177 {
5178         int i;
5179         u32 val;
5180
5181         bnx2_clear_ring_states(bp);
5182
5183         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5184         for (i = 0; i < bp->num_tx_rings; i++)
5185                 bnx2_init_tx_ring(bp, i);
5186
5187         if (bp->num_tx_rings > 1)
5188                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5189                        (TX_TSS_CID << 7));
5190
5191         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5192         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5193
5194         for (i = 0; i < bp->num_rx_rings; i++)
5195                 bnx2_init_rx_ring(bp, i);
5196
5197         if (bp->num_rx_rings > 1) {
5198                 u32 tbl_32;
5199                 u8 *tbl = (u8 *) &tbl_32;
5200
5201                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5202                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5203
5204                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5205                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5206                         if ((i % 4) == 3)
5207                                 bnx2_reg_wr_ind(bp,
5208                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5209                                                 cpu_to_be32(tbl_32));
5210                 }
5211
5212                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5213                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5214
5215                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5216
5217         }
5218 }
5219
5220 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5221 {
5222         u32 max, num_rings = 1;
5223
5224         while (ring_size > MAX_RX_DESC_CNT) {
5225                 ring_size -= MAX_RX_DESC_CNT;
5226                 num_rings++;
5227         }
5228         /* round to next power of 2 */
5229         max = max_size;
5230         while ((max & num_rings) == 0)
5231                 max >>= 1;
5232
5233         if (num_rings != max)
5234                 max <<= 1;
5235
5236         return max;
5237 }
5238
5239 static void
5240 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5241 {
5242         u32 rx_size, rx_space, jumbo_size;
5243
5244         /* 8 for CRC and VLAN */
5245         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5246
5247         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5248                 sizeof(struct skb_shared_info);
5249
5250         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5251         bp->rx_pg_ring_size = 0;
5252         bp->rx_max_pg_ring = 0;
5253         bp->rx_max_pg_ring_idx = 0;
5254         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5255                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5256
5257                 jumbo_size = size * pages;
5258                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5259                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5260
5261                 bp->rx_pg_ring_size = jumbo_size;
5262                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5263                                                         MAX_RX_PG_RINGS);
5264                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5265                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5266                 bp->rx_copy_thresh = 0;
5267         }
5268
5269         bp->rx_buf_use_size = rx_size;
5270         /* hw alignment */
5271         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5272         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5273         bp->rx_ring_size = size;
5274         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5275         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5276 }
5277
5278 static void
5279 bnx2_free_tx_skbs(struct bnx2 *bp)
5280 {
5281         int i;
5282
5283         for (i = 0; i < bp->num_tx_rings; i++) {
5284                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5285                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5286                 int j;
5287
5288                 if (txr->tx_buf_ring == NULL)
5289                         continue;
5290
5291                 for (j = 0; j < TX_DESC_CNT; ) {
5292                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5293                         struct sk_buff *skb = tx_buf->skb;
5294
5295                         if (skb == NULL) {
5296                                 j++;
5297                                 continue;
5298                         }
5299
5300                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5301
5302                         tx_buf->skb = NULL;
5303
5304                         j += skb_shinfo(skb)->nr_frags + 1;
5305                         dev_kfree_skb(skb);
5306                 }
5307         }
5308 }
5309
5310 static void
5311 bnx2_free_rx_skbs(struct bnx2 *bp)
5312 {
5313         int i;
5314
5315         for (i = 0; i < bp->num_rx_rings; i++) {
5316                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5317                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5318                 int j;
5319
5320                 if (rxr->rx_buf_ring == NULL)
5321                         return;
5322
5323                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5324                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5325                         struct sk_buff *skb = rx_buf->skb;
5326
5327                         if (skb == NULL)
5328                                 continue;
5329
5330                         pci_unmap_single(bp->pdev,
5331                                          pci_unmap_addr(rx_buf, mapping),
5332                                          bp->rx_buf_use_size,
5333                                          PCI_DMA_FROMDEVICE);
5334
5335                         rx_buf->skb = NULL;
5336
5337                         dev_kfree_skb(skb);
5338                 }
5339                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5340                         bnx2_free_rx_page(bp, rxr, j);
5341         }
5342 }
5343
5344 static void
5345 bnx2_free_skbs(struct bnx2 *bp)
5346 {
5347         bnx2_free_tx_skbs(bp);
5348         bnx2_free_rx_skbs(bp);
5349 }
5350
5351 static int
5352 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5353 {
5354         int rc;
5355
5356         rc = bnx2_reset_chip(bp, reset_code);
5357         bnx2_free_skbs(bp);
5358         if (rc)
5359                 return rc;
5360
5361         if ((rc = bnx2_init_chip(bp)) != 0)
5362                 return rc;
5363
5364         bnx2_init_all_rings(bp);
5365         return 0;
5366 }
5367
5368 static int
5369 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5370 {
5371         int rc;
5372
5373         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5374                 return rc;
5375
5376         spin_lock_bh(&bp->phy_lock);
5377         bnx2_init_phy(bp, reset_phy);
5378         bnx2_set_link(bp);
5379         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5380                 bnx2_remote_phy_event(bp);
5381         spin_unlock_bh(&bp->phy_lock);
5382         return 0;
5383 }
5384
5385 static int
5386 bnx2_shutdown_chip(struct bnx2 *bp)
5387 {
5388         u32 reset_code;
5389
5390         if (bp->flags & BNX2_FLAG_NO_WOL)
5391                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5392         else if (bp->wol)
5393                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5394         else
5395                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5396
5397         return bnx2_reset_chip(bp, reset_code);
5398 }
5399
5400 static int
5401 bnx2_test_registers(struct bnx2 *bp)
5402 {
5403         int ret;
5404         int i, is_5709;
5405         static const struct {
5406                 u16   offset;
5407                 u16   flags;
5408 #define BNX2_FL_NOT_5709        1
5409                 u32   rw_mask;
5410                 u32   ro_mask;
5411         } reg_tbl[] = {
5412                 { 0x006c, 0, 0x00000000, 0x0000003f },
5413                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5414                 { 0x0094, 0, 0x00000000, 0x00000000 },
5415
5416                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5417                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5418                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5419                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5420                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5421                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5422                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5423                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5424                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5425
5426                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5427                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5428                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5429                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5430                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5431                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5432
5433                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5434                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5435                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5436
5437                 { 0x1000, 0, 0x00000000, 0x00000001 },
5438                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5439
5440                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5441                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5442                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5443                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5444                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5445                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5446                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5447                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5448                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5449                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5450
5451                 { 0x1800, 0, 0x00000000, 0x00000001 },
5452                 { 0x1804, 0, 0x00000000, 0x00000003 },
5453
5454                 { 0x2800, 0, 0x00000000, 0x00000001 },
5455                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5456                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5457                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5458                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5459                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5460                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5461                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5462                 { 0x2840, 0, 0x00000000, 0xffffffff },
5463                 { 0x2844, 0, 0x00000000, 0xffffffff },
5464                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5465                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5466
5467                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5468                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5469
5470                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5471                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5472                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5473                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5474                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5475                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5476                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5477                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5478                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5479
5480                 { 0x5004, 0, 0x00000000, 0x0000007f },
5481                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5482
5483                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5484                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5485                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5486                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5487                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5488                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5489                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5490                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5491                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5492
5493                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5494                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5495                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5496                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5497                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5498                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5499                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5500                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5501                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5502                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5503                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5504                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5505                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5506                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5507                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5508                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5509                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5510                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5511                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5512                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5513                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5514                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5515                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5516
5517                 { 0xffff, 0, 0x00000000, 0x00000000 },
5518         };
5519
5520         ret = 0;
5521         is_5709 = 0;
5522         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5523                 is_5709 = 1;
5524
5525         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5526                 u32 offset, rw_mask, ro_mask, save_val, val;
5527                 u16 flags = reg_tbl[i].flags;
5528
5529                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5530                         continue;
5531
5532                 offset = (u32) reg_tbl[i].offset;
5533                 rw_mask = reg_tbl[i].rw_mask;
5534                 ro_mask = reg_tbl[i].ro_mask;
5535
5536                 save_val = readl(bp->regview + offset);
5537
5538                 writel(0, bp->regview + offset);
5539
5540                 val = readl(bp->regview + offset);
5541                 if ((val & rw_mask) != 0) {
5542                         goto reg_test_err;
5543                 }
5544
5545                 if ((val & ro_mask) != (save_val & ro_mask)) {
5546                         goto reg_test_err;
5547                 }
5548
5549                 writel(0xffffffff, bp->regview + offset);
5550
5551                 val = readl(bp->regview + offset);
5552                 if ((val & rw_mask) != rw_mask) {
5553                         goto reg_test_err;
5554                 }
5555
5556                 if ((val & ro_mask) != (save_val & ro_mask)) {
5557                         goto reg_test_err;
5558                 }
5559
5560                 writel(save_val, bp->regview + offset);
5561                 continue;
5562
5563 reg_test_err:
5564                 writel(save_val, bp->regview + offset);
5565                 ret = -ENODEV;
5566                 break;
5567         }
5568         return ret;
5569 }
5570
5571 static int
5572 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5573 {
5574         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5575                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5576         int i;
5577
5578         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5579                 u32 offset;
5580
5581                 for (offset = 0; offset < size; offset += 4) {
5582
5583                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5584
5585                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5586                                 test_pattern[i]) {
5587                                 return -ENODEV;
5588                         }
5589                 }
5590         }
5591         return 0;
5592 }
5593
5594 static int
5595 bnx2_test_memory(struct bnx2 *bp)
5596 {
5597         int ret = 0;
5598         int i;
5599         static struct mem_entry {
5600                 u32   offset;
5601                 u32   len;
5602         } mem_tbl_5706[] = {
5603                 { 0x60000,  0x4000 },
5604                 { 0xa0000,  0x3000 },
5605                 { 0xe0000,  0x4000 },
5606                 { 0x120000, 0x4000 },
5607                 { 0x1a0000, 0x4000 },
5608                 { 0x160000, 0x4000 },
5609                 { 0xffffffff, 0    },
5610         },
5611         mem_tbl_5709[] = {
5612                 { 0x60000,  0x4000 },
5613                 { 0xa0000,  0x3000 },
5614                 { 0xe0000,  0x4000 },
5615                 { 0x120000, 0x4000 },
5616                 { 0x1a0000, 0x4000 },
5617                 { 0xffffffff, 0    },
5618         };
5619         struct mem_entry *mem_tbl;
5620
5621         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5622                 mem_tbl = mem_tbl_5709;
5623         else
5624                 mem_tbl = mem_tbl_5706;
5625
5626         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5627                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5628                         mem_tbl[i].len)) != 0) {
5629                         return ret;
5630                 }
5631         }
5632
5633         return ret;
5634 }
5635
5636 #define BNX2_MAC_LOOPBACK       0
5637 #define BNX2_PHY_LOOPBACK       1
5638
5639 static int
5640 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5641 {
5642         unsigned int pkt_size, num_pkts, i;
5643         struct sk_buff *skb, *rx_skb;
5644         unsigned char *packet;
5645         u16 rx_start_idx, rx_idx;
5646         dma_addr_t map;
5647         struct tx_bd *txbd;
5648         struct sw_bd *rx_buf;
5649         struct l2_fhdr *rx_hdr;
5650         int ret = -ENODEV;
5651         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5652         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5653         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5654
5655         tx_napi = bnapi;
5656
5657         txr = &tx_napi->tx_ring;
5658         rxr = &bnapi->rx_ring;
5659         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5660                 bp->loopback = MAC_LOOPBACK;
5661                 bnx2_set_mac_loopback(bp);
5662         }
5663         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5664                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5665                         return 0;
5666
5667                 bp->loopback = PHY_LOOPBACK;
5668                 bnx2_set_phy_loopback(bp);
5669         }
5670         else
5671                 return -EINVAL;
5672
5673         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5674         skb = netdev_alloc_skb(bp->dev, pkt_size);
5675         if (!skb)
5676                 return -ENOMEM;
5677         packet = skb_put(skb, pkt_size);
5678         memcpy(packet, bp->dev->dev_addr, 6);
5679         memset(packet + 6, 0x0, 8);
5680         for (i = 14; i < pkt_size; i++)
5681                 packet[i] = (unsigned char) (i & 0xff);
5682
5683         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5684                 dev_kfree_skb(skb);
5685                 return -EIO;
5686         }
5687         map = skb_shinfo(skb)->dma_head;
5688
5689         REG_WR(bp, BNX2_HC_COMMAND,
5690                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5691
5692         REG_RD(bp, BNX2_HC_COMMAND);
5693
5694         udelay(5);
5695         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5696
5697         num_pkts = 0;
5698
5699         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5700
5701         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5702         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5703         txbd->tx_bd_mss_nbytes = pkt_size;
5704         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5705
5706         num_pkts++;
5707         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5708         txr->tx_prod_bseq += pkt_size;
5709
5710         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5711         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5712
5713         udelay(100);
5714
5715         REG_WR(bp, BNX2_HC_COMMAND,
5716                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5717
5718         REG_RD(bp, BNX2_HC_COMMAND);
5719
5720         udelay(5);
5721
5722         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5723         dev_kfree_skb(skb);
5724
5725         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5726                 goto loopback_test_done;
5727
5728         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5729         if (rx_idx != rx_start_idx + num_pkts) {
5730                 goto loopback_test_done;
5731         }
5732
5733         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5734         rx_skb = rx_buf->skb;
5735
5736         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5737         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5738
5739         pci_dma_sync_single_for_cpu(bp->pdev,
5740                 pci_unmap_addr(rx_buf, mapping),
5741                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5742
5743         if (rx_hdr->l2_fhdr_status &
5744                 (L2_FHDR_ERRORS_BAD_CRC |
5745                 L2_FHDR_ERRORS_PHY_DECODE |
5746                 L2_FHDR_ERRORS_ALIGNMENT |
5747                 L2_FHDR_ERRORS_TOO_SHORT |
5748                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5749
5750                 goto loopback_test_done;
5751         }
5752
5753         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5754                 goto loopback_test_done;
5755         }
5756
5757         for (i = 14; i < pkt_size; i++) {
5758                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5759                         goto loopback_test_done;
5760                 }
5761         }
5762
5763         ret = 0;
5764
5765 loopback_test_done:
5766         bp->loopback = 0;
5767         return ret;
5768 }
5769
5770 #define BNX2_MAC_LOOPBACK_FAILED        1
5771 #define BNX2_PHY_LOOPBACK_FAILED        2
5772 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5773                                          BNX2_PHY_LOOPBACK_FAILED)
5774
5775 static int
5776 bnx2_test_loopback(struct bnx2 *bp)
5777 {
5778         int rc = 0;
5779
5780         if (!netif_running(bp->dev))
5781                 return BNX2_LOOPBACK_FAILED;
5782
5783         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5784         spin_lock_bh(&bp->phy_lock);
5785         bnx2_init_phy(bp, 1);
5786         spin_unlock_bh(&bp->phy_lock);
5787         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5788                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5789         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5790                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5791         return rc;
5792 }
5793
5794 #define NVRAM_SIZE 0x200
5795 #define CRC32_RESIDUAL 0xdebb20e3
5796
5797 static int
5798 bnx2_test_nvram(struct bnx2 *bp)
5799 {
5800         __be32 buf[NVRAM_SIZE / 4];
5801         u8 *data = (u8 *) buf;
5802         int rc = 0;
5803         u32 magic, csum;
5804
5805         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5806                 goto test_nvram_done;
5807
5808         magic = be32_to_cpu(buf[0]);
5809         if (magic != 0x669955aa) {
5810                 rc = -ENODEV;
5811                 goto test_nvram_done;
5812         }
5813
5814         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5815                 goto test_nvram_done;
5816
5817         csum = ether_crc_le(0x100, data);
5818         if (csum != CRC32_RESIDUAL) {
5819                 rc = -ENODEV;
5820                 goto test_nvram_done;
5821         }
5822
5823         csum = ether_crc_le(0x100, data + 0x100);
5824         if (csum != CRC32_RESIDUAL) {
5825                 rc = -ENODEV;
5826         }
5827
5828 test_nvram_done:
5829         return rc;
5830 }
5831
5832 static int
5833 bnx2_test_link(struct bnx2 *bp)
5834 {
5835         u32 bmsr;
5836
5837         if (!netif_running(bp->dev))
5838                 return -ENODEV;
5839
5840         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5841                 if (bp->link_up)
5842                         return 0;
5843                 return -ENODEV;
5844         }
5845         spin_lock_bh(&bp->phy_lock);
5846         bnx2_enable_bmsr1(bp);
5847         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5848         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5849         bnx2_disable_bmsr1(bp);
5850         spin_unlock_bh(&bp->phy_lock);
5851
5852         if (bmsr & BMSR_LSTATUS) {
5853                 return 0;
5854         }
5855         return -ENODEV;
5856 }
5857
5858 static int
5859 bnx2_test_intr(struct bnx2 *bp)
5860 {
5861         int i;
5862         u16 status_idx;
5863
5864         if (!netif_running(bp->dev))
5865                 return -ENODEV;
5866
5867         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5868
5869         /* This register is not touched during run-time. */
5870         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5871         REG_RD(bp, BNX2_HC_COMMAND);
5872
5873         for (i = 0; i < 10; i++) {
5874                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5875                         status_idx) {
5876
5877                         break;
5878                 }
5879
5880                 msleep_interruptible(10);
5881         }
5882         if (i < 10)
5883                 return 0;
5884
5885         return -ENODEV;
5886 }
5887
5888 /* Determining link for parallel detection. */
5889 static int
5890 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5891 {
5892         u32 mode_ctl, an_dbg, exp;
5893
5894         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5895                 return 0;
5896
5897         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5898         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5899
5900         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5901                 return 0;
5902
5903         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5904         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5905         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5906
5907         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5908                 return 0;
5909
5910         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5911         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5912         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5913
5914         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5915                 return 0;
5916
5917         return 1;
5918 }
5919
5920 static void
5921 bnx2_5706_serdes_timer(struct bnx2 *bp)
5922 {
5923         int check_link = 1;
5924
5925         spin_lock(&bp->phy_lock);
5926         if (bp->serdes_an_pending) {
5927                 bp->serdes_an_pending--;
5928                 check_link = 0;
5929         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5930                 u32 bmcr;
5931
5932                 bp->current_interval = BNX2_TIMER_INTERVAL;
5933
5934                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5935
5936                 if (bmcr & BMCR_ANENABLE) {
5937                         if (bnx2_5706_serdes_has_link(bp)) {
5938                                 bmcr &= ~BMCR_ANENABLE;
5939                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5940                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5941                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5942                         }
5943                 }
5944         }
5945         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5946                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5947                 u32 phy2;
5948
5949                 bnx2_write_phy(bp, 0x17, 0x0f01);
5950                 bnx2_read_phy(bp, 0x15, &phy2);
5951                 if (phy2 & 0x20) {
5952                         u32 bmcr;
5953
5954                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5955                         bmcr |= BMCR_ANENABLE;
5956                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5957
5958                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5959                 }
5960         } else
5961                 bp->current_interval = BNX2_TIMER_INTERVAL;
5962
5963         if (check_link) {
5964                 u32 val;
5965
5966                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5967                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5968                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5969
5970                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5971                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5972                                 bnx2_5706s_force_link_dn(bp, 1);
5973                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5974                         } else
5975                                 bnx2_set_link(bp);
5976                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5977                         bnx2_set_link(bp);
5978         }
5979         spin_unlock(&bp->phy_lock);
5980 }
5981
5982 static void
5983 bnx2_5708_serdes_timer(struct bnx2 *bp)
5984 {
5985         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5986                 return;
5987
5988         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5989                 bp->serdes_an_pending = 0;
5990                 return;
5991         }
5992
5993         spin_lock(&bp->phy_lock);
5994         if (bp->serdes_an_pending)
5995                 bp->serdes_an_pending--;
5996         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5997                 u32 bmcr;
5998
5999                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6000                 if (bmcr & BMCR_ANENABLE) {
6001                         bnx2_enable_forced_2g5(bp);
6002                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6003                 } else {
6004                         bnx2_disable_forced_2g5(bp);
6005                         bp->serdes_an_pending = 2;
6006                         bp->current_interval = BNX2_TIMER_INTERVAL;
6007                 }
6008
6009         } else
6010                 bp->current_interval = BNX2_TIMER_INTERVAL;
6011
6012         spin_unlock(&bp->phy_lock);
6013 }
6014
6015 static void
6016 bnx2_timer(unsigned long data)
6017 {
6018         struct bnx2 *bp = (struct bnx2 *) data;
6019
6020         if (!netif_running(bp->dev))
6021                 return;
6022
6023         if (atomic_read(&bp->intr_sem) != 0)
6024                 goto bnx2_restart_timer;
6025
6026         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6027              BNX2_FLAG_USING_MSI)
6028                 bnx2_chk_missed_msi(bp);
6029
6030         bnx2_send_heart_beat(bp);
6031
6032         bp->stats_blk->stat_FwRxDrop =
6033                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6034
6035         /* workaround occasional corrupted counters */
6036         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6037                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6038                                             BNX2_HC_COMMAND_STATS_NOW);
6039
6040         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6041                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6042                         bnx2_5706_serdes_timer(bp);
6043                 else
6044                         bnx2_5708_serdes_timer(bp);
6045         }
6046
6047 bnx2_restart_timer:
6048         mod_timer(&bp->timer, jiffies + bp->current_interval);
6049 }
6050
6051 static int
6052 bnx2_request_irq(struct bnx2 *bp)
6053 {
6054         unsigned long flags;
6055         struct bnx2_irq *irq;
6056         int rc = 0, i;
6057
6058         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6059                 flags = 0;
6060         else
6061                 flags = IRQF_SHARED;
6062
6063         for (i = 0; i < bp->irq_nvecs; i++) {
6064                 irq = &bp->irq_tbl[i];
6065                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6066                                  &bp->bnx2_napi[i]);
6067                 if (rc)
6068                  &