drivers/net: Convert unbounded kzalloc calls to kcalloc
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.17"
62 #define DRV_MODULE_RELDATE      "July 18, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == TX_DESC_CNT)
266                         diff = MAX_TX_DESC_CNT;
267         }
268         return (bp->tx_ring_size - diff);
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310                 int i;
311
312                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
314                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 REG_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         bp->cnic_data = data;
391         rcu_assign_pointer(bp->cnic_ops, ops);
392
393         cp->num_irq = 0;
394         cp->drv_state = CNIC_DRV_STATE_REGD;
395
396         bnx2_setup_cnic_irq_info(bp);
397
398         return 0;
399 }
400
401 static int bnx2_unregister_cnic(struct net_device *dev)
402 {
403         struct bnx2 *bp = netdev_priv(dev);
404         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406
407         mutex_lock(&bp->cnic_lock);
408         cp->drv_state = 0;
409         bnapi->cnic_present = 0;
410         rcu_assign_pointer(bp->cnic_ops, NULL);
411         mutex_unlock(&bp->cnic_lock);
412         synchronize_rcu();
413         return 0;
414 }
415
416 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417 {
418         struct bnx2 *bp = netdev_priv(dev);
419         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420
421         cp->drv_owner = THIS_MODULE;
422         cp->chip_id = bp->chip_id;
423         cp->pdev = bp->pdev;
424         cp->io_base = bp->regview;
425         cp->drv_ctl = bnx2_drv_ctl;
426         cp->drv_register_cnic = bnx2_register_cnic;
427         cp->drv_unregister_cnic = bnx2_unregister_cnic;
428
429         return cp;
430 }
431 EXPORT_SYMBOL(bnx2_cnic_probe);
432
433 static void
434 bnx2_cnic_stop(struct bnx2 *bp)
435 {
436         struct cnic_ops *c_ops;
437         struct cnic_ctl_info info;
438
439         mutex_lock(&bp->cnic_lock);
440         c_ops = bp->cnic_ops;
441         if (c_ops) {
442                 info.cmd = CNIC_CTL_STOP_CMD;
443                 c_ops->cnic_ctl(bp->cnic_data, &info);
444         }
445         mutex_unlock(&bp->cnic_lock);
446 }
447
448 static void
449 bnx2_cnic_start(struct bnx2 *bp)
450 {
451         struct cnic_ops *c_ops;
452         struct cnic_ctl_info info;
453
454         mutex_lock(&bp->cnic_lock);
455         c_ops = bp->cnic_ops;
456         if (c_ops) {
457                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
459
460                         bnapi->cnic_tag = bnapi->last_status_idx;
461                 }
462                 info.cmd = CNIC_CTL_START_CMD;
463                 c_ops->cnic_ctl(bp->cnic_data, &info);
464         }
465         mutex_unlock(&bp->cnic_lock);
466 }
467
468 #else
469
470 static void
471 bnx2_cnic_stop(struct bnx2 *bp)
472 {
473 }
474
475 static void
476 bnx2_cnic_start(struct bnx2 *bp)
477 {
478 }
479
480 #endif
481
482 static int
483 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
484 {
485         u32 val1;
486         int i, ret;
487
488         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
491
492                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
494
495                 udelay(40);
496         }
497
498         val1 = (bp->phy_addr << 21) | (reg << 16) |
499                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500                 BNX2_EMAC_MDIO_COMM_START_BUSY;
501         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
502
503         for (i = 0; i < 50; i++) {
504                 udelay(10);
505
506                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508                         udelay(5);
509
510                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
511                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
512
513                         break;
514                 }
515         }
516
517         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518                 *val = 0x0;
519                 ret = -EBUSY;
520         }
521         else {
522                 *val = val1;
523                 ret = 0;
524         }
525
526         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
529
530                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
532
533                 udelay(40);
534         }
535
536         return ret;
537 }
538
539 static int
540 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
541 {
542         u32 val1;
543         int i, ret;
544
545         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
548
549                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
551
552                 udelay(40);
553         }
554
555         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
559
560         for (i = 0; i < 50; i++) {
561                 udelay(10);
562
563                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
564                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565                         udelay(5);
566                         break;
567                 }
568         }
569
570         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571                 ret = -EBUSY;
572         else
573                 ret = 0;
574
575         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
578
579                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
581
582                 udelay(40);
583         }
584
585         return ret;
586 }
587
588 static void
589 bnx2_disable_int(struct bnx2 *bp)
590 {
591         int i;
592         struct bnx2_napi *bnapi;
593
594         for (i = 0; i < bp->irq_nvecs; i++) {
595                 bnapi = &bp->bnx2_napi[i];
596                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
598         }
599         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
600 }
601
602 static void
603 bnx2_enable_int(struct bnx2 *bp)
604 {
605         int i;
606         struct bnx2_napi *bnapi;
607
608         for (i = 0; i < bp->irq_nvecs; i++) {
609                 bnapi = &bp->bnx2_napi[i];
610
611                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614                        bnapi->last_status_idx);
615
616                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618                        bnapi->last_status_idx);
619         }
620         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
621 }
622
623 static void
624 bnx2_disable_int_sync(struct bnx2 *bp)
625 {
626         int i;
627
628         atomic_inc(&bp->intr_sem);
629         if (!netif_running(bp->dev))
630                 return;
631
632         bnx2_disable_int(bp);
633         for (i = 0; i < bp->irq_nvecs; i++)
634                 synchronize_irq(bp->irq_tbl[i].vector);
635 }
636
637 static void
638 bnx2_napi_disable(struct bnx2 *bp)
639 {
640         int i;
641
642         for (i = 0; i < bp->irq_nvecs; i++)
643                 napi_disable(&bp->bnx2_napi[i].napi);
644 }
645
646 static void
647 bnx2_napi_enable(struct bnx2 *bp)
648 {
649         int i;
650
651         for (i = 0; i < bp->irq_nvecs; i++)
652                 napi_enable(&bp->bnx2_napi[i].napi);
653 }
654
655 static void
656 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
657 {
658         if (stop_cnic)
659                 bnx2_cnic_stop(bp);
660         if (netif_running(bp->dev)) {
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663         }
664         bnx2_disable_int_sync(bp);
665         netif_carrier_off(bp->dev);     /* prevent tx timeout */
666 }
667
668 static void
669 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
670 {
671         if (atomic_dec_and_test(&bp->intr_sem)) {
672                 if (netif_running(bp->dev)) {
673                         netif_tx_wake_all_queues(bp->dev);
674                         spin_lock_bh(&bp->phy_lock);
675                         if (bp->link_up)
676                                 netif_carrier_on(bp->dev);
677                         spin_unlock_bh(&bp->phy_lock);
678                         bnx2_napi_enable(bp);
679                         bnx2_enable_int(bp);
680                         if (start_cnic)
681                                 bnx2_cnic_start(bp);
682                 }
683         }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689         int i;
690
691         for (i = 0; i < bp->num_tx_rings; i++) {
692                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695                 if (txr->tx_desc_ring) {
696                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
697                                           txr->tx_desc_ring,
698                                           txr->tx_desc_mapping);
699                         txr->tx_desc_ring = NULL;
700                 }
701                 kfree(txr->tx_buf_ring);
702                 txr->tx_buf_ring = NULL;
703         }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709         int i;
710
711         for (i = 0; i < bp->num_rx_rings; i++) {
712                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714                 int j;
715
716                 for (j = 0; j < bp->rx_max_ring; j++) {
717                         if (rxr->rx_desc_ring[j])
718                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
719                                                   rxr->rx_desc_ring[j],
720                                                   rxr->rx_desc_mapping[j]);
721                         rxr->rx_desc_ring[j] = NULL;
722                 }
723                 vfree(rxr->rx_buf_ring);
724                 rxr->rx_buf_ring = NULL;
725
726                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727                         if (rxr->rx_pg_desc_ring[j])
728                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729                                                   rxr->rx_pg_desc_ring[j],
730                                                   rxr->rx_pg_desc_mapping[j]);
731                         rxr->rx_pg_desc_ring[j] = NULL;
732                 }
733                 vfree(rxr->rx_pg_ring);
734                 rxr->rx_pg_ring = NULL;
735         }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741         int i;
742
743         for (i = 0; i < bp->num_tx_rings; i++) {
744                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748                 if (txr->tx_buf_ring == NULL)
749                         return -ENOMEM;
750
751                 txr->tx_desc_ring =
752                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
753                                            &txr->tx_desc_mapping, GFP_KERNEL);
754                 if (txr->tx_desc_ring == NULL)
755                         return -ENOMEM;
756         }
757         return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763         int i;
764
765         for (i = 0; i < bp->num_rx_rings; i++) {
766                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768                 int j;
769
770                 rxr->rx_buf_ring =
771                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772                 if (rxr->rx_buf_ring == NULL)
773                         return -ENOMEM;
774
775                 memset(rxr->rx_buf_ring, 0,
776                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
777
778                 for (j = 0; j < bp->rx_max_ring; j++) {
779                         rxr->rx_desc_ring[j] =
780                                 dma_alloc_coherent(&bp->pdev->dev,
781                                                    RXBD_RING_SIZE,
782                                                    &rxr->rx_desc_mapping[j],
783                                                    GFP_KERNEL);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796                                bp->rx_max_pg_ring);
797                 }
798
799                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800                         rxr->rx_pg_desc_ring[j] =
801                                 dma_alloc_coherent(&bp->pdev->dev,
802                                                    RXBD_RING_SIZE,
803                                                    &rxr->rx_pg_desc_mapping[j],
804                                                    GFP_KERNEL);
805                         if (rxr->rx_pg_desc_ring[j] == NULL)
806                                 return -ENOMEM;
807
808                 }
809         }
810         return 0;
811 }
812
813 static void
814 bnx2_free_mem(struct bnx2 *bp)
815 {
816         int i;
817         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
818
819         bnx2_free_tx_mem(bp);
820         bnx2_free_rx_mem(bp);
821
822         for (i = 0; i < bp->ctx_pages; i++) {
823                 if (bp->ctx_blk[i]) {
824                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
825                                           bp->ctx_blk[i],
826                                           bp->ctx_blk_mapping[i]);
827                         bp->ctx_blk[i] = NULL;
828                 }
829         }
830         if (bnapi->status_blk.msi) {
831                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
832                                   bnapi->status_blk.msi,
833                                   bp->status_blk_mapping);
834                 bnapi->status_blk.msi = NULL;
835                 bp->stats_blk = NULL;
836         }
837 }
838
839 static int
840 bnx2_alloc_mem(struct bnx2 *bp)
841 {
842         int i, status_blk_size, err;
843         struct bnx2_napi *bnapi;
844         void *status_blk;
845
846         /* Combine status and statistics blocks into one allocation. */
847         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
848         if (bp->flags & BNX2_FLAG_MSIX_CAP)
849                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
850                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
851         bp->status_stats_size = status_blk_size +
852                                 sizeof(struct statistics_block);
853
854         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
855                                         &bp->status_blk_mapping, GFP_KERNEL);
856         if (status_blk == NULL)
857                 goto alloc_mem_err;
858
859         memset(status_blk, 0, bp->status_stats_size);
860
861         bnapi = &bp->bnx2_napi[0];
862         bnapi->status_blk.msi = status_blk;
863         bnapi->hw_tx_cons_ptr =
864                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865         bnapi->hw_rx_cons_ptr =
866                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868                 for (i = 1; i < bp->irq_nvecs; i++) {
869                         struct status_block_msix *sblk;
870
871                         bnapi = &bp->bnx2_napi[i];
872
873                         sblk = (void *) (status_blk +
874                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875                         bnapi->status_blk.msix = sblk;
876                         bnapi->hw_tx_cons_ptr =
877                                 &sblk->status_tx_quick_consumer_index;
878                         bnapi->hw_rx_cons_ptr =
879                                 &sblk->status_rx_quick_consumer_index;
880                         bnapi->int_num = i << 24;
881                 }
882         }
883
884         bp->stats_blk = status_blk + status_blk_size;
885
886         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887
888         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
889                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
890                 if (bp->ctx_pages == 0)
891                         bp->ctx_pages = 1;
892                 for (i = 0; i < bp->ctx_pages; i++) {
893                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894                                                 BCM_PAGE_SIZE,
895                                                 &bp->ctx_blk_mapping[i],
896                                                 GFP_KERNEL);
897                         if (bp->ctx_blk[i] == NULL)
898                                 goto alloc_mem_err;
899                 }
900         }
901
902         err = bnx2_alloc_rx_mem(bp);
903         if (err)
904                 goto alloc_mem_err;
905
906         err = bnx2_alloc_tx_mem(bp);
907         if (err)
908                 goto alloc_mem_err;
909
910         return 0;
911
912 alloc_mem_err:
913         bnx2_free_mem(bp);
914         return -ENOMEM;
915 }
916
917 static void
918 bnx2_report_fw_link(struct bnx2 *bp)
919 {
920         u32 fw_link_status = 0;
921
922         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923                 return;
924
925         if (bp->link_up) {
926                 u32 bmsr;
927
928                 switch (bp->line_speed) {
929                 case SPEED_10:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
934                         break;
935                 case SPEED_100:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
940                         break;
941                 case SPEED_1000:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
946                         break;
947                 case SPEED_2500:
948                         if (bp->duplex == DUPLEX_HALF)
949                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
950                         else
951                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
952                         break;
953                 }
954
955                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
956
957                 if (bp->autoneg) {
958                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959
960                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962
963                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966                         else
967                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968                 }
969         }
970         else
971                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972
973         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 }
975
976 static char *
977 bnx2_xceiver_str(struct bnx2 *bp)
978 {
979         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
980                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981                  "Copper"));
982 }
983
984 static void
985 bnx2_report_link(struct bnx2 *bp)
986 {
987         if (bp->link_up) {
988                 netif_carrier_on(bp->dev);
989                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
990                             bnx2_xceiver_str(bp),
991                             bp->line_speed,
992                             bp->duplex == DUPLEX_FULL ? "full" : "half");
993
994                 if (bp->flow_ctrl) {
995                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
996                                 pr_cont(", receive ");
997                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
998                                         pr_cont("& transmit ");
999                         }
1000                         else {
1001                                 pr_cont(", transmit ");
1002                         }
1003                         pr_cont("flow control ON");
1004                 }
1005                 pr_cont("\n");
1006         } else {
1007                 netif_carrier_off(bp->dev);
1008                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1009                            bnx2_xceiver_str(bp));
1010         }
1011
1012         bnx2_report_fw_link(bp);
1013 }
1014
1015 static void
1016 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017 {
1018         u32 local_adv, remote_adv;
1019
1020         bp->flow_ctrl = 0;
1021         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023
1024                 if (bp->duplex == DUPLEX_FULL) {
1025                         bp->flow_ctrl = bp->req_flow_ctrl;
1026                 }
1027                 return;
1028         }
1029
1030         if (bp->duplex != DUPLEX_FULL) {
1031                 return;
1032         }
1033
1034         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1036                 u32 val;
1037
1038                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040                         bp->flow_ctrl |= FLOW_CTRL_TX;
1041                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042                         bp->flow_ctrl |= FLOW_CTRL_RX;
1043                 return;
1044         }
1045
1046         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048
1049         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050                 u32 new_local_adv = 0;
1051                 u32 new_remote_adv = 0;
1052
1053                 if (local_adv & ADVERTISE_1000XPAUSE)
1054                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1055                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057                 if (remote_adv & ADVERTISE_1000XPAUSE)
1058                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061
1062                 local_adv = new_local_adv;
1063                 remote_adv = new_remote_adv;
1064         }
1065
1066         /* See Table 28B-3 of 802.3ab-1999 spec. */
1067         if (local_adv & ADVERTISE_PAUSE_CAP) {
1068                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073                                 bp->flow_ctrl = FLOW_CTRL_RX;
1074                         }
1075                 }
1076                 else {
1077                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1079                         }
1080                 }
1081         }
1082         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085
1086                         bp->flow_ctrl = FLOW_CTRL_TX;
1087                 }
1088         }
1089 }
1090
1091 static int
1092 bnx2_5709s_linkup(struct bnx2 *bp)
1093 {
1094         u32 val, speed;
1095
1096         bp->link_up = 1;
1097
1098         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101
1102         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103                 bp->line_speed = bp->req_line_speed;
1104                 bp->duplex = bp->req_duplex;
1105                 return 0;
1106         }
1107         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108         switch (speed) {
1109                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1110                         bp->line_speed = SPEED_10;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1113                         bp->line_speed = SPEED_100;
1114                         break;
1115                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117                         bp->line_speed = SPEED_1000;
1118                         break;
1119                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120                         bp->line_speed = SPEED_2500;
1121                         break;
1122         }
1123         if (val & MII_BNX2_GP_TOP_AN_FD)
1124                 bp->duplex = DUPLEX_FULL;
1125         else
1126                 bp->duplex = DUPLEX_HALF;
1127         return 0;
1128 }
1129
1130 static int
1131 bnx2_5708s_linkup(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         bp->link_up = 1;
1136         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138                 case BCM5708S_1000X_STAT1_SPEED_10:
1139                         bp->line_speed = SPEED_10;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_100:
1142                         bp->line_speed = SPEED_100;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_1G:
1145                         bp->line_speed = SPEED_1000;
1146                         break;
1147                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1148                         bp->line_speed = SPEED_2500;
1149                         break;
1150         }
1151         if (val & BCM5708S_1000X_STAT1_FD)
1152                 bp->duplex = DUPLEX_FULL;
1153         else
1154                 bp->duplex = DUPLEX_HALF;
1155
1156         return 0;
1157 }
1158
1159 static int
1160 bnx2_5706s_linkup(struct bnx2 *bp)
1161 {
1162         u32 bmcr, local_adv, remote_adv, common;
1163
1164         bp->link_up = 1;
1165         bp->line_speed = SPEED_1000;
1166
1167         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168         if (bmcr & BMCR_FULLDPLX) {
1169                 bp->duplex = DUPLEX_FULL;
1170         }
1171         else {
1172                 bp->duplex = DUPLEX_HALF;
1173         }
1174
1175         if (!(bmcr & BMCR_ANENABLE)) {
1176                 return 0;
1177         }
1178
1179         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181
1182         common = local_adv & remote_adv;
1183         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184
1185                 if (common & ADVERTISE_1000XFULL) {
1186                         bp->duplex = DUPLEX_FULL;
1187                 }
1188                 else {
1189                         bp->duplex = DUPLEX_HALF;
1190                 }
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int
1197 bnx2_copper_linkup(struct bnx2 *bp)
1198 {
1199         u32 bmcr;
1200
1201         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202         if (bmcr & BMCR_ANENABLE) {
1203                 u32 local_adv, remote_adv, common;
1204
1205                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207
1208                 common = local_adv & (remote_adv >> 2);
1209                 if (common & ADVERTISE_1000FULL) {
1210                         bp->line_speed = SPEED_1000;
1211                         bp->duplex = DUPLEX_FULL;
1212                 }
1213                 else if (common & ADVERTISE_1000HALF) {
1214                         bp->line_speed = SPEED_1000;
1215                         bp->duplex = DUPLEX_HALF;
1216                 }
1217                 else {
1218                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220
1221                         common = local_adv & remote_adv;
1222                         if (common & ADVERTISE_100FULL) {
1223                                 bp->line_speed = SPEED_100;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_100HALF) {
1227                                 bp->line_speed = SPEED_100;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else if (common & ADVERTISE_10FULL) {
1231                                 bp->line_speed = SPEED_10;
1232                                 bp->duplex = DUPLEX_FULL;
1233                         }
1234                         else if (common & ADVERTISE_10HALF) {
1235                                 bp->line_speed = SPEED_10;
1236                                 bp->duplex = DUPLEX_HALF;
1237                         }
1238                         else {
1239                                 bp->line_speed = 0;
1240                                 bp->link_up = 0;
1241                         }
1242                 }
1243         }
1244         else {
1245                 if (bmcr & BMCR_SPEED100) {
1246                         bp->line_speed = SPEED_100;
1247                 }
1248                 else {
1249                         bp->line_speed = SPEED_10;
1250                 }
1251                 if (bmcr & BMCR_FULLDPLX) {
1252                         bp->duplex = DUPLEX_FULL;
1253                 }
1254                 else {
1255                         bp->duplex = DUPLEX_HALF;
1256                 }
1257         }
1258
1259         return 0;
1260 }
1261
1262 static void
1263 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264 {
1265         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266
1267         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269         val |= 0x02 << 8;
1270
1271         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1272                 u32 lo_water, hi_water;
1273
1274                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1275                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1276                 else
1277                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1278                 if (lo_water >= bp->rx_ring_size)
1279                         lo_water = 0;
1280
1281                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1282
1283                 if (hi_water <= lo_water)
1284                         lo_water = 0;
1285
1286                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1287                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1288
1289                 if (hi_water > 0xf)
1290                         hi_water = 0xf;
1291                 else if (hi_water == 0)
1292                         lo_water = 0;
1293                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1294         }
1295         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1296 }
1297
1298 static void
1299 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1300 {
1301         int i;
1302         u32 cid;
1303
1304         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1305                 if (i == 1)
1306                         cid = RX_RSS_CID;
1307                 bnx2_init_rx_context(bp, cid);
1308         }
1309 }
1310
1311 static void
1312 bnx2_set_mac_link(struct bnx2 *bp)
1313 {
1314         u32 val;
1315
1316         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1317         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1318                 (bp->duplex == DUPLEX_HALF)) {
1319                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1320         }
1321
1322         /* Configure the EMAC mode register. */
1323         val = REG_RD(bp, BNX2_EMAC_MODE);
1324
1325         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1326                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1327                 BNX2_EMAC_MODE_25G_MODE);
1328
1329         if (bp->link_up) {
1330                 switch (bp->line_speed) {
1331                         case SPEED_10:
1332                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1333                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1334                                         break;
1335                                 }
1336                                 /* fall through */
1337                         case SPEED_100:
1338                                 val |= BNX2_EMAC_MODE_PORT_MII;
1339                                 break;
1340                         case SPEED_2500:
1341                                 val |= BNX2_EMAC_MODE_25G_MODE;
1342                                 /* fall through */
1343                         case SPEED_1000:
1344                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1345                                 break;
1346                 }
1347         }
1348         else {
1349                 val |= BNX2_EMAC_MODE_PORT_GMII;
1350         }
1351
1352         /* Set the MAC to operate in the appropriate duplex mode. */
1353         if (bp->duplex == DUPLEX_HALF)
1354                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1355         REG_WR(bp, BNX2_EMAC_MODE, val);
1356
1357         /* Enable/disable rx PAUSE. */
1358         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_RX)
1361                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1362         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1363
1364         /* Enable/disable tx PAUSE. */
1365         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1366         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1367
1368         if (bp->flow_ctrl & FLOW_CTRL_TX)
1369                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1370         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1371
1372         /* Acknowledge the interrupt. */
1373         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1374
1375         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1376                 bnx2_init_all_rx_contexts(bp);
1377 }
1378
1379 static void
1380 bnx2_enable_bmsr1(struct bnx2 *bp)
1381 {
1382         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1383             (CHIP_NUM(bp) == CHIP_NUM_5709))
1384                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1385                                MII_BNX2_BLK_ADDR_GP_STATUS);
1386 }
1387
1388 static void
1389 bnx2_disable_bmsr1(struct bnx2 *bp)
1390 {
1391         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1392             (CHIP_NUM(bp) == CHIP_NUM_5709))
1393                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1395 }
1396
1397 static int
1398 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1399 {
1400         u32 up1;
1401         int ret = 1;
1402
1403         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1404                 return 0;
1405
1406         if (bp->autoneg & AUTONEG_SPEED)
1407                 bp->advertising |= ADVERTISED_2500baseX_Full;
1408
1409         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1411
1412         bnx2_read_phy(bp, bp->mii_up1, &up1);
1413         if (!(up1 & BCM5708S_UP1_2G5)) {
1414                 up1 |= BCM5708S_UP1_2G5;
1415                 bnx2_write_phy(bp, bp->mii_up1, up1);
1416                 ret = 0;
1417         }
1418
1419         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1420                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1421                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1422
1423         return ret;
1424 }
1425
1426 static int
1427 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1428 {
1429         u32 up1;
1430         int ret = 0;
1431
1432         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1433                 return 0;
1434
1435         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1436                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1437
1438         bnx2_read_phy(bp, bp->mii_up1, &up1);
1439         if (up1 & BCM5708S_UP1_2G5) {
1440                 up1 &= ~BCM5708S_UP1_2G5;
1441                 bnx2_write_phy(bp, bp->mii_up1, up1);
1442                 ret = 1;
1443         }
1444
1445         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1446                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1447                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1448
1449         return ret;
1450 }
1451
1452 static void
1453 bnx2_enable_forced_2g5(struct bnx2 *bp)
1454 {
1455         u32 uninitialized_var(bmcr);
1456         int err;
1457
1458         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1459                 return;
1460
1461         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1462                 u32 val;
1463
1464                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1465                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1466                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1467                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1468                         val |= MII_BNX2_SD_MISC1_FORCE |
1469                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1470                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1471                 }
1472
1473                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1474                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1475                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1476
1477         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1478                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479                 if (!err)
1480                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1481         } else {
1482                 return;
1483         }
1484
1485         if (err)
1486                 return;
1487
1488         if (bp->autoneg & AUTONEG_SPEED) {
1489                 bmcr &= ~BMCR_ANENABLE;
1490                 if (bp->req_duplex == DUPLEX_FULL)
1491                         bmcr |= BMCR_FULLDPLX;
1492         }
1493         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1494 }
1495
1496 static void
1497 bnx2_disable_forced_2g5(struct bnx2 *bp)
1498 {
1499         u32 uninitialized_var(bmcr);
1500         int err;
1501
1502         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1503                 return;
1504
1505         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1506                 u32 val;
1507
1508                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1509                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1510                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1511                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1512                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1513                 }
1514
1515                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1516                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1517                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1518
1519         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1520                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521                 if (!err)
1522                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1523         } else {
1524                 return;
1525         }
1526
1527         if (err)
1528                 return;
1529
1530         if (bp->autoneg & AUTONEG_SPEED)
1531                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1532         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1533 }
1534
1535 static void
1536 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1537 {
1538         u32 val;
1539
1540         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1541         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1542         if (start)
1543                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1544         else
1545                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1546 }
1547
1548 static int
1549 bnx2_set_link(struct bnx2 *bp)
1550 {
1551         u32 bmsr;
1552         u8 link_up;
1553
1554         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1555                 bp->link_up = 1;
1556                 return 0;
1557         }
1558
1559         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560                 return 0;
1561
1562         link_up = bp->link_up;
1563
1564         bnx2_enable_bmsr1(bp);
1565         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1566         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1567         bnx2_disable_bmsr1(bp);
1568
1569         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1570             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1571                 u32 val, an_dbg;
1572
1573                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1574                         bnx2_5706s_force_link_dn(bp, 0);
1575                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1576                 }
1577                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1578
1579                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1580                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1581                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1582
1583                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1584                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1585                         bmsr |= BMSR_LSTATUS;
1586                 else
1587                         bmsr &= ~BMSR_LSTATUS;
1588         }
1589
1590         if (bmsr & BMSR_LSTATUS) {
1591                 bp->link_up = 1;
1592
1593                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1594                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1595                                 bnx2_5706s_linkup(bp);
1596                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1597                                 bnx2_5708s_linkup(bp);
1598                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1599                                 bnx2_5709s_linkup(bp);
1600                 }
1601                 else {
1602                         bnx2_copper_linkup(bp);
1603                 }
1604                 bnx2_resolve_flow_ctrl(bp);
1605         }
1606         else {
1607                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1608                     (bp->autoneg & AUTONEG_SPEED))
1609                         bnx2_disable_forced_2g5(bp);
1610
1611                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1612                         u32 bmcr;
1613
1614                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1615                         bmcr |= BMCR_ANENABLE;
1616                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1617
1618                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1619                 }
1620                 bp->link_up = 0;
1621         }
1622
1623         if (bp->link_up != link_up) {
1624                 bnx2_report_link(bp);
1625         }
1626
1627         bnx2_set_mac_link(bp);
1628
1629         return 0;
1630 }
1631
1632 static int
1633 bnx2_reset_phy(struct bnx2 *bp)
1634 {
1635         int i;
1636         u32 reg;
1637
1638         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1639
1640 #define PHY_RESET_MAX_WAIT 100
1641         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1642                 udelay(10);
1643
1644                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1645                 if (!(reg & BMCR_RESET)) {
1646                         udelay(20);
1647                         break;
1648                 }
1649         }
1650         if (i == PHY_RESET_MAX_WAIT) {
1651                 return -EBUSY;
1652         }
1653         return 0;
1654 }
1655
1656 static u32
1657 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1658 {
1659         u32 adv = 0;
1660
1661         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1662                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1663
1664                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665                         adv = ADVERTISE_1000XPAUSE;
1666                 }
1667                 else {
1668                         adv = ADVERTISE_PAUSE_CAP;
1669                 }
1670         }
1671         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1672                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1673                         adv = ADVERTISE_1000XPSE_ASYM;
1674                 }
1675                 else {
1676                         adv = ADVERTISE_PAUSE_ASYM;
1677                 }
1678         }
1679         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1680                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1681                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1682                 }
1683                 else {
1684                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1685                 }
1686         }
1687         return adv;
1688 }
1689
1690 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1691
1692 static int
1693 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1694 __releases(&bp->phy_lock)
1695 __acquires(&bp->phy_lock)
1696 {
1697         u32 speed_arg = 0, pause_adv;
1698
1699         pause_adv = bnx2_phy_get_pause_adv(bp);
1700
1701         if (bp->autoneg & AUTONEG_SPEED) {
1702                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1703                 if (bp->advertising & ADVERTISED_10baseT_Half)
1704                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1705                 if (bp->advertising & ADVERTISED_10baseT_Full)
1706                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                 if (bp->advertising & ADVERTISED_100baseT_Half)
1708                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709                 if (bp->advertising & ADVERTISED_100baseT_Full)
1710                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1711                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1712                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1713                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1714                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1715         } else {
1716                 if (bp->req_line_speed == SPEED_2500)
1717                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718                 else if (bp->req_line_speed == SPEED_1000)
1719                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1720                 else if (bp->req_line_speed == SPEED_100) {
1721                         if (bp->req_duplex == DUPLEX_FULL)
1722                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1723                         else
1724                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1725                 } else if (bp->req_line_speed == SPEED_10) {
1726                         if (bp->req_duplex == DUPLEX_FULL)
1727                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1728                         else
1729                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1730                 }
1731         }
1732
1733         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1734                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1735         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1736                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1737
1738         if (port == PORT_TP)
1739                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1740                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1741
1742         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1743
1744         spin_unlock_bh(&bp->phy_lock);
1745         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1746         spin_lock_bh(&bp->phy_lock);
1747
1748         return 0;
1749 }
1750
1751 static int
1752 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1753 __releases(&bp->phy_lock)
1754 __acquires(&bp->phy_lock)
1755 {
1756         u32 adv, bmcr;
1757         u32 new_adv = 0;
1758
1759         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1760                 return (bnx2_setup_remote_phy(bp, port));
1761
1762         if (!(bp->autoneg & AUTONEG_SPEED)) {
1763                 u32 new_bmcr;
1764                 int force_link_down = 0;
1765
1766                 if (bp->req_line_speed == SPEED_2500) {
1767                         if (!bnx2_test_and_enable_2g5(bp))
1768                                 force_link_down = 1;
1769                 } else if (bp->req_line_speed == SPEED_1000) {
1770                         if (bnx2_test_and_disable_2g5(bp))
1771                                 force_link_down = 1;
1772                 }
1773                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1774                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1775
1776                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1777                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1778                 new_bmcr |= BMCR_SPEED1000;
1779
1780                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1781                         if (bp->req_line_speed == SPEED_2500)
1782                                 bnx2_enable_forced_2g5(bp);
1783                         else if (bp->req_line_speed == SPEED_1000) {
1784                                 bnx2_disable_forced_2g5(bp);
1785                                 new_bmcr &= ~0x2000;
1786                         }
1787
1788                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1789                         if (bp->req_line_speed == SPEED_2500)
1790                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1791                         else
1792                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1793                 }
1794
1795                 if (bp->req_duplex == DUPLEX_FULL) {
1796                         adv |= ADVERTISE_1000XFULL;
1797                         new_bmcr |= BMCR_FULLDPLX;
1798                 }
1799                 else {
1800                         adv |= ADVERTISE_1000XHALF;
1801                         new_bmcr &= ~BMCR_FULLDPLX;
1802                 }
1803                 if ((new_bmcr != bmcr) || (force_link_down)) {
1804                         /* Force a link down visible on the other side */
1805                         if (bp->link_up) {
1806                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1807                                                ~(ADVERTISE_1000XFULL |
1808                                                  ADVERTISE_1000XHALF));
1809                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1810                                         BMCR_ANRESTART | BMCR_ANENABLE);
1811
1812                                 bp->link_up = 0;
1813                                 netif_carrier_off(bp->dev);
1814                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1815                                 bnx2_report_link(bp);
1816                         }
1817                         bnx2_write_phy(bp, bp->mii_adv, adv);
1818                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1819                 } else {
1820                         bnx2_resolve_flow_ctrl(bp);
1821                         bnx2_set_mac_link(bp);
1822                 }
1823                 return 0;
1824         }
1825
1826         bnx2_test_and_enable_2g5(bp);
1827
1828         if (bp->advertising & ADVERTISED_1000baseT_Full)
1829                 new_adv |= ADVERTISE_1000XFULL;
1830
1831         new_adv |= bnx2_phy_get_pause_adv(bp);
1832
1833         bnx2_read_phy(bp, bp->mii_adv, &adv);
1834         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1835
1836         bp->serdes_an_pending = 0;
1837         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1838                 /* Force a link down visible on the other side */
1839                 if (bp->link_up) {
1840                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1841                         spin_unlock_bh(&bp->phy_lock);
1842                         msleep(20);
1843                         spin_lock_bh(&bp->phy_lock);
1844                 }
1845
1846                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1847                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1848                         BMCR_ANENABLE);
1849                 /* Speed up link-up time when the link partner
1850                  * does not autonegotiate which is very common
1851                  * in blade servers. Some blade servers use
1852                  * IPMI for kerboard input and it's important
1853                  * to minimize link disruptions. Autoneg. involves
1854                  * exchanging base pages plus 3 next pages and
1855                  * normally completes in about 120 msec.
1856                  */
1857                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1858                 bp->serdes_an_pending = 1;
1859                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1860         } else {
1861                 bnx2_resolve_flow_ctrl(bp);
1862                 bnx2_set_mac_link(bp);
1863         }
1864
1865         return 0;
1866 }
1867
1868 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1869         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1870                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1871                 (ADVERTISED_1000baseT_Full)
1872
1873 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1874         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1875         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1876         ADVERTISED_1000baseT_Full)
1877
1878 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1879         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1880
1881 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1882
1883 static void
1884 bnx2_set_default_remote_link(struct bnx2 *bp)
1885 {
1886         u32 link;
1887
1888         if (bp->phy_port == PORT_TP)
1889                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1890         else
1891                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1892
1893         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1894                 bp->req_line_speed = 0;
1895                 bp->autoneg |= AUTONEG_SPEED;
1896                 bp->advertising = ADVERTISED_Autoneg;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898                         bp->advertising |= ADVERTISED_10baseT_Half;
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1900                         bp->advertising |= ADVERTISED_10baseT_Full;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1902                         bp->advertising |= ADVERTISED_100baseT_Half;
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1904                         bp->advertising |= ADVERTISED_100baseT_Full;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906                         bp->advertising |= ADVERTISED_1000baseT_Full;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908                         bp->advertising |= ADVERTISED_2500baseX_Full;
1909         } else {
1910                 bp->autoneg = 0;
1911                 bp->advertising = 0;
1912                 bp->req_duplex = DUPLEX_FULL;
1913                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1914                         bp->req_line_speed = SPEED_10;
1915                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1916                                 bp->req_duplex = DUPLEX_HALF;
1917                 }
1918                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1919                         bp->req_line_speed = SPEED_100;
1920                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1921                                 bp->req_duplex = DUPLEX_HALF;
1922                 }
1923                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1924                         bp->req_line_speed = SPEED_1000;
1925                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1926                         bp->req_line_speed = SPEED_2500;
1927         }
1928 }
1929
1930 static void
1931 bnx2_set_default_link(struct bnx2 *bp)
1932 {
1933         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1934                 bnx2_set_default_remote_link(bp);
1935                 return;
1936         }
1937
1938         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1939         bp->req_line_speed = 0;
1940         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1941                 u32 reg;
1942
1943                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1944
1945                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1946                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1947                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1948                         bp->autoneg = 0;
1949                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1950                         bp->req_duplex = DUPLEX_FULL;
1951                 }
1952         } else
1953                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1954 }
1955
1956 static void
1957 bnx2_send_heart_beat(struct bnx2 *bp)
1958 {
1959         u32 msg;
1960         u32 addr;
1961
1962         spin_lock(&bp->indirect_lock);
1963         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1964         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1965         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1966         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1967         spin_unlock(&bp->indirect_lock);
1968 }
1969
1970 static void
1971 bnx2_remote_phy_event(struct bnx2 *bp)
1972 {
1973         u32 msg;
1974         u8 link_up = bp->link_up;
1975         u8 old_port;
1976
1977         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1978
1979         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1980                 bnx2_send_heart_beat(bp);
1981
1982         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1983
1984         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1985                 bp->link_up = 0;
1986         else {
1987                 u32 speed;
1988
1989                 bp->link_up = 1;
1990                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1991                 bp->duplex = DUPLEX_FULL;
1992                 switch (speed) {
1993                         case BNX2_LINK_STATUS_10HALF:
1994                                 bp->duplex = DUPLEX_HALF;
1995                         case BNX2_LINK_STATUS_10FULL:
1996                                 bp->line_speed = SPEED_10;
1997                                 break;
1998                         case BNX2_LINK_STATUS_100HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                         case BNX2_LINK_STATUS_100BASE_T4:
2001                         case BNX2_LINK_STATUS_100FULL:
2002                                 bp->line_speed = SPEED_100;
2003                                 break;
2004                         case BNX2_LINK_STATUS_1000HALF:
2005                                 bp->duplex = DUPLEX_HALF;
2006                         case BNX2_LINK_STATUS_1000FULL:
2007                                 bp->line_speed = SPEED_1000;
2008                                 break;
2009                         case BNX2_LINK_STATUS_2500HALF:
2010                                 bp->duplex = DUPLEX_HALF;
2011                         case BNX2_LINK_STATUS_2500FULL:
2012                                 bp->line_speed = SPEED_2500;
2013                                 break;
2014                         default:
2015                                 bp->line_speed = 0;
2016                                 break;
2017                 }
2018
2019                 bp->flow_ctrl = 0;
2020                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2021                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2022                         if (bp->duplex == DUPLEX_FULL)
2023                                 bp->flow_ctrl = bp->req_flow_ctrl;
2024                 } else {
2025                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2026                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2027                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2028                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2029                 }
2030
2031                 old_port = bp->phy_port;
2032                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2033                         bp->phy_port = PORT_FIBRE;
2034                 else
2035                         bp->phy_port = PORT_TP;
2036
2037                 if (old_port != bp->phy_port)
2038                         bnx2_set_default_link(bp);
2039
2040         }
2041         if (bp->link_up != link_up)
2042                 bnx2_report_link(bp);
2043
2044         bnx2_set_mac_link(bp);
2045 }
2046
2047 static int
2048 bnx2_set_remote_link(struct bnx2 *bp)
2049 {
2050         u32 evt_code;
2051
2052         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2053         switch (evt_code) {
2054                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2055                         bnx2_remote_phy_event(bp);
2056                         break;
2057                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2058                 default:
2059                         bnx2_send_heart_beat(bp);
2060                         break;
2061         }
2062         return 0;
2063 }
2064
2065 static int
2066 bnx2_setup_copper_phy(struct bnx2 *bp)
2067 __releases(&bp->phy_lock)
2068 __acquires(&bp->phy_lock)
2069 {
2070         u32 bmcr;
2071         u32 new_bmcr;
2072
2073         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2074
2075         if (bp->autoneg & AUTONEG_SPEED) {
2076                 u32 adv_reg, adv1000_reg;
2077                 u32 new_adv_reg = 0;
2078                 u32 new_adv1000_reg = 0;
2079
2080                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2081                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2082                         ADVERTISE_PAUSE_ASYM);
2083
2084                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2085                 adv1000_reg &= PHY_ALL_1000_SPEED;
2086
2087                 if (bp->advertising & ADVERTISED_10baseT_Half)
2088                         new_adv_reg |= ADVERTISE_10HALF;
2089                 if (bp->advertising & ADVERTISED_10baseT_Full)
2090                         new_adv_reg |= ADVERTISE_10FULL;
2091                 if (bp->advertising & ADVERTISED_100baseT_Half)
2092                         new_adv_reg |= ADVERTISE_100HALF;
2093                 if (bp->advertising & ADVERTISED_100baseT_Full)
2094                         new_adv_reg |= ADVERTISE_100FULL;
2095                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2096                         new_adv1000_reg |= ADVERTISE_1000FULL;
2097
2098                 new_adv_reg |= ADVERTISE_CSMA;
2099
2100                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2101
2102                 if ((adv1000_reg != new_adv1000_reg) ||
2103                         (adv_reg != new_adv_reg) ||
2104                         ((bmcr & BMCR_ANENABLE) == 0)) {
2105
2106                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2107                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2108                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2109                                 BMCR_ANENABLE);
2110                 }
2111                 else if (bp->link_up) {
2112                         /* Flow ctrl may have changed from auto to forced */
2113                         /* or vice-versa. */
2114
2115                         bnx2_resolve_flow_ctrl(bp);
2116                         bnx2_set_mac_link(bp);
2117                 }
2118                 return 0;
2119         }
2120
2121         new_bmcr = 0;
2122         if (bp->req_line_speed == SPEED_100) {
2123                 new_bmcr |= BMCR_SPEED100;
2124         }
2125         if (bp->req_duplex == DUPLEX_FULL) {
2126                 new_bmcr |= BMCR_FULLDPLX;
2127         }
2128         if (new_bmcr != bmcr) {
2129                 u32 bmsr;
2130
2131                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134                 if (bmsr & BMSR_LSTATUS) {
2135                         /* Force link down */
2136                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137                         spin_unlock_bh(&bp->phy_lock);
2138                         msleep(50);
2139                         spin_lock_bh(&bp->phy_lock);
2140
2141                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143                 }
2144
2145                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147                 /* Normally, the new speed is setup after the link has
2148                  * gone down and up again. In some cases, link will not go
2149                  * down so we need to set up the new speed here.
2150                  */
2151                 if (bmsr & BMSR_LSTATUS) {
2152                         bp->line_speed = bp->req_line_speed;
2153                         bp->duplex = bp->req_duplex;
2154                         bnx2_resolve_flow_ctrl(bp);
2155                         bnx2_set_mac_link(bp);
2156                 }
2157         } else {
2158                 bnx2_resolve_flow_ctrl(bp);
2159                 bnx2_set_mac_link(bp);
2160         }
2161         return 0;
2162 }
2163
2164 static int
2165 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166 __releases(&bp->phy_lock)
2167 __acquires(&bp->phy_lock)
2168 {
2169         if (bp->loopback == MAC_LOOPBACK)
2170                 return 0;
2171
2172         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173                 return (bnx2_setup_serdes_phy(bp, port));
2174         }
2175         else {
2176                 return (bnx2_setup_copper_phy(bp));
2177         }
2178 }
2179
2180 static int
2181 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182 {
2183         u32 val;
2184
2185         bp->mii_bmcr = MII_BMCR + 0x10;
2186         bp->mii_bmsr = MII_BMSR + 0x10;
2187         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188         bp->mii_adv = MII_ADVERTISE + 0x10;
2189         bp->mii_lpa = MII_LPA + 0x10;
2190         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196         if (reset_phy)
2197                 bnx2_reset_phy(bp);
2198
2199         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209                 val |= BCM5708S_UP1_2G5;
2210         else
2211                 val &= ~BCM5708S_UP1_2G5;
2212         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227         return 0;
2228 }
2229
2230 static int
2231 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232 {
2233         u32 val;
2234
2235         if (reset_phy)
2236                 bnx2_reset_phy(bp);
2237
2238         bp->mii_up1 = BCM5708S_UP1;
2239
2240         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254                 val |= BCM5708S_UP1_2G5;
2255                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2256         }
2257
2258         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2259             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2260             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2261                 /* increase tx signal amplitude */
2262                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                                BCM5708S_BLK_ADDR_TX_MISC);
2264                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268         }
2269
2270         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273         if (val) {
2274                 u32 is_backplane;
2275
2276                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279                                        BCM5708S_BLK_ADDR_TX_MISC);
2280                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282                                        BCM5708S_BLK_ADDR_DIG);
2283                 }
2284         }
2285         return 0;
2286 }
2287
2288 static int
2289 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290 {
2291         if (reset_phy)
2292                 bnx2_reset_phy(bp);
2293
2294         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2297                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299         if (bp->dev->mtu > 1500) {
2300                 u32 val;
2301
2302                 /* Set extended packet length bit */
2303                 bnx2_write_phy(bp, 0x18, 0x7);
2304                 bnx2_read_phy(bp, 0x18, &val);
2305                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2308                 bnx2_read_phy(bp, 0x1c, &val);
2309                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310         }
2311         else {
2312                 u32 val;
2313
2314                 bnx2_write_phy(bp, 0x18, 0x7);
2315                 bnx2_read_phy(bp, 0x18, &val);
2316                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2319                 bnx2_read_phy(bp, 0x1c, &val);
2320                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321         }
2322
2323         return 0;
2324 }
2325
2326 static int
2327 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328 {
2329         u32 val;
2330
2331         if (reset_phy)
2332                 bnx2_reset_phy(bp);
2333
2334         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335                 bnx2_write_phy(bp, 0x18, 0x0c00);
2336                 bnx2_write_phy(bp, 0x17, 0x000a);
2337                 bnx2_write_phy(bp, 0x15, 0x310b);
2338                 bnx2_write_phy(bp, 0x17, 0x201f);
2339                 bnx2_write_phy(bp, 0x15, 0x9506);
2340                 bnx2_write_phy(bp, 0x17, 0x401f);
2341                 bnx2_write_phy(bp, 0x15, 0x14e2);
2342                 bnx2_write_phy(bp, 0x18, 0x0400);
2343         }
2344
2345         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2348                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349                 val &= ~(1 << 8);
2350                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351         }
2352
2353         if (bp->dev->mtu > 1500) {
2354                 /* Set extended packet length bit */
2355                 bnx2_write_phy(bp, 0x18, 0x7);
2356                 bnx2_read_phy(bp, 0x18, &val);
2357                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359                 bnx2_read_phy(bp, 0x10, &val);
2360                 bnx2_write_phy(bp, 0x10, val | 0x1);
2361         }
2362         else {
2363                 bnx2_write_phy(bp, 0x18, 0x7);
2364                 bnx2_read_phy(bp, 0x18, &val);
2365                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367                 bnx2_read_phy(bp, 0x10, &val);
2368                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2369         }
2370
2371         /* ethernet@wirespeed */
2372         bnx2_write_phy(bp, 0x18, 0x7007);
2373         bnx2_read_phy(bp, 0x18, &val);
2374         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2375         return 0;
2376 }
2377
2378
2379 static int
2380 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2381 __releases(&bp->phy_lock)
2382 __acquires(&bp->phy_lock)
2383 {
2384         u32 val;
2385         int rc = 0;
2386
2387         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2388         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2389
2390         bp->mii_bmcr = MII_BMCR;
2391         bp->mii_bmsr = MII_BMSR;
2392         bp->mii_bmsr1 = MII_BMSR;
2393         bp->mii_adv = MII_ADVERTISE;
2394         bp->mii_lpa = MII_LPA;
2395
2396         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2397
2398         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2399                 goto setup_phy;
2400
2401         bnx2_read_phy(bp, MII_PHYSID1, &val);
2402         bp->phy_id = val << 16;
2403         bnx2_read_phy(bp, MII_PHYSID2, &val);
2404         bp->phy_id |= val & 0xffff;
2405
2406         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2407                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2408                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2409                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2410                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2411                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2412                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2413         }
2414         else {
2415                 rc = bnx2_init_copper_phy(bp, reset_phy);
2416         }
2417
2418 setup_phy:
2419         if (!rc)
2420                 rc = bnx2_setup_phy(bp, bp->phy_port);
2421
2422         return rc;
2423 }
2424
2425 static int
2426 bnx2_set_mac_loopback(struct bnx2 *bp)
2427 {
2428         u32 mac_mode;
2429
2430         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2432         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2433         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2434         bp->link_up = 1;
2435         return 0;
2436 }
2437
2438 static int bnx2_test_link(struct bnx2 *);
2439
2440 static int
2441 bnx2_set_phy_loopback(struct bnx2 *bp)
2442 {
2443         u32 mac_mode;
2444         int rc, i;
2445
2446         spin_lock_bh(&bp->phy_lock);
2447         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2448                             BMCR_SPEED1000);
2449         spin_unlock_bh(&bp->phy_lock);
2450         if (rc)
2451                 return rc;
2452
2453         for (i = 0; i < 10; i++) {
2454                 if (bnx2_test_link(bp) == 0)
2455                         break;
2456                 msleep(100);
2457         }
2458
2459         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2460         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2461                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2462                       BNX2_EMAC_MODE_25G_MODE);
2463
2464         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2465         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2466         bp->link_up = 1;
2467         return 0;
2468 }
2469
2470 static int
2471 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2472 {
2473         int i;
2474         u32 val;
2475
2476         bp->fw_wr_seq++;
2477         msg_data |= bp->fw_wr_seq;
2478
2479         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2480
2481         if (!ack)
2482                 return 0;
2483
2484         /* wait for an acknowledgement. */
2485         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2486                 msleep(10);
2487
2488                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2489
2490                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2491                         break;
2492         }
2493         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2494                 return 0;
2495
2496         /* If we timed out, inform the firmware that this is the case. */
2497         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2498                 if (!silent)
2499                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2500
2501                 msg_data &= ~BNX2_DRV_MSG_CODE;
2502                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2503
2504                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2505
2506                 return -EBUSY;
2507         }
2508
2509         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2510                 return -EIO;
2511
2512         return 0;
2513 }
2514
2515 static int
2516 bnx2_init_5709_context(struct bnx2 *bp)
2517 {
2518         int i, ret = 0;
2519         u32 val;
2520
2521         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2522         val |= (BCM_PAGE_BITS - 8) << 16;
2523         REG_WR(bp, BNX2_CTX_COMMAND, val);
2524         for (i = 0; i < 10; i++) {
2525                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2526                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2527                         break;
2528                 udelay(2);
2529         }
2530         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2531                 return -EBUSY;
2532
2533         for (i = 0; i < bp->ctx_pages; i++) {
2534                 int j;
2535
2536                 if (bp->ctx_blk[i])
2537                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2538                 else
2539                         return -ENOMEM;
2540
2541                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2542                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2543                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2544                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2545                        (u64) bp->ctx_blk_mapping[i] >> 32);
2546                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2547                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2548                 for (j = 0; j < 10; j++) {
2549
2550                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2551                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2552                                 break;
2553                         udelay(5);
2554                 }
2555                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2556                         ret = -EBUSY;
2557                         break;
2558                 }
2559         }
2560         return ret;
2561 }
2562
2563 static void
2564 bnx2_init_context(struct bnx2 *bp)
2565 {
2566         u32 vcid;
2567
2568         vcid = 96;
2569         while (vcid) {
2570                 u32 vcid_addr, pcid_addr, offset;
2571                 int i;
2572
2573                 vcid--;
2574
2575                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2576                         u32 new_vcid;
2577
2578                         vcid_addr = GET_PCID_ADDR(vcid);
2579                         if (vcid & 0x8) {
2580                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2581                         }
2582                         else {
2583                                 new_vcid = vcid;
2584                         }
2585                         pcid_addr = GET_PCID_ADDR(new_vcid);
2586                 }
2587                 else {
2588                         vcid_addr = GET_CID_ADDR(vcid);
2589                         pcid_addr = vcid_addr;
2590                 }
2591
2592                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2593                         vcid_addr += (i << PHY_CTX_SHIFT);
2594                         pcid_addr += (i << PHY_CTX_SHIFT);
2595
2596                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2597                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2598
2599                         /* Zero out the context. */
2600                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2601                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2602                 }
2603         }
2604 }
2605
2606 static int
2607 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2608 {
2609         u16 *good_mbuf;
2610         u32 good_mbuf_cnt;
2611         u32 val;
2612
2613         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2614         if (good_mbuf == NULL) {
2615                 pr_err("Failed to allocate memory in %s\n", __func__);
2616                 return -ENOMEM;
2617         }
2618
2619         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2620                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2621
2622         good_mbuf_cnt = 0;
2623
2624         /* Allocate a bunch of mbufs and save the good ones in an array. */
2625         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2626         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2627                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2628                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2629
2630                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2631
2632                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2633
2634                 /* The addresses with Bit 9 set are bad memory blocks. */
2635                 if (!(val & (1 << 9))) {
2636                         good_mbuf[good_mbuf_cnt] = (u16) val;
2637                         good_mbuf_cnt++;
2638                 }
2639
2640                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641         }
2642
2643         /* Free the good ones back to the mbuf pool thus discarding
2644          * all the bad ones. */
2645         while (good_mbuf_cnt) {
2646                 good_mbuf_cnt--;
2647
2648                 val = good_mbuf[good_mbuf_cnt];
2649                 val = (val << 9) | val | 1;
2650
2651                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2652         }
2653         kfree(good_mbuf);
2654         return 0;
2655 }
2656
2657 static void
2658 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2659 {
2660         u32 val;
2661
2662         val = (mac_addr[0] << 8) | mac_addr[1];
2663
2664         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2665
2666         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2667                 (mac_addr[4] << 8) | mac_addr[5];
2668
2669         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2670 }
2671
2672 static inline int
2673 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2674 {
2675         dma_addr_t mapping;
2676         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2677         struct rx_bd *rxbd =
2678                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2679         struct page *page = alloc_page(gfp);
2680
2681         if (!page)
2682                 return -ENOMEM;
2683         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2684                                PCI_DMA_FROMDEVICE);
2685         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2686                 __free_page(page);
2687                 return -EIO;
2688         }
2689
2690         rx_pg->page = page;
2691         dma_unmap_addr_set(rx_pg, mapping, mapping);
2692         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2693         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2694         return 0;
2695 }
2696
2697 static void
2698 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699 {
2700         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2701         struct page *page = rx_pg->page;
2702
2703         if (!page)
2704                 return;
2705
2706         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2707                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2708
2709         __free_page(page);
2710         rx_pg->page = NULL;
2711 }
2712
2713 static inline int
2714 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2715 {
2716         struct sk_buff *skb;
2717         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2718         dma_addr_t mapping;
2719         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2720         unsigned long align;
2721
2722         skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2723         if (skb == NULL) {
2724                 return -ENOMEM;
2725         }
2726
2727         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2728                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2729
2730         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2731                                  PCI_DMA_FROMDEVICE);
2732         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2733                 dev_kfree_skb(skb);
2734                 return -EIO;
2735         }
2736
2737         rx_buf->skb = skb;
2738         rx_buf->desc = (struct l2_fhdr *) skb->data;
2739         dma_unmap_addr_set(rx_buf, mapping, mapping);
2740
2741         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743
2744         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2745
2746         return 0;
2747 }
2748
2749 static int
2750 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2751 {
2752         struct status_block *sblk = bnapi->status_blk.msi;
2753         u32 new_link_state, old_link_state;
2754         int is_set = 1;
2755
2756         new_link_state = sblk->status_attn_bits & event;
2757         old_link_state = sblk->status_attn_bits_ack & event;
2758         if (new_link_state != old_link_state) {
2759                 if (new_link_state)
2760                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2761                 else
2762                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2763         } else
2764                 is_set = 0;
2765
2766         return is_set;
2767 }
2768
2769 static void
2770 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2771 {
2772         spin_lock(&bp->phy_lock);
2773
2774         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2775                 bnx2_set_link(bp);
2776         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2777                 bnx2_set_remote_link(bp);
2778
2779         spin_unlock(&bp->phy_lock);
2780
2781 }
2782
2783 static inline u16
2784 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2785 {
2786         u16 cons;
2787
2788         /* Tell compiler that status block fields can change. */
2789         barrier();
2790         cons = *bnapi->hw_tx_cons_ptr;
2791         barrier();
2792         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2793                 cons++;
2794         return cons;
2795 }
2796
2797 static int
2798 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2799 {
2800         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2801         u16 hw_cons, sw_cons, sw_ring_cons;
2802         int tx_pkt = 0, index;
2803         struct netdev_queue *txq;
2804
2805         index = (bnapi - bp->bnx2_napi);
2806         txq = netdev_get_tx_queue(bp->dev, index);
2807
2808         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2809         sw_cons = txr->tx_cons;
2810
2811         while (sw_cons != hw_cons) {
2812                 struct sw_tx_bd *tx_buf;
2813                 struct sk_buff *skb;
2814                 int i, last;
2815
2816                 sw_ring_cons = TX_RING_IDX(sw_cons);
2817
2818                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2819                 skb = tx_buf->skb;
2820
2821                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2822                 prefetch(&skb->end);
2823
2824                 /* partial BD completions possible with TSO packets */
2825                 if (tx_buf->is_gso) {
2826                         u16 last_idx, last_ring_idx;
2827
2828                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2829                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2830                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2831                                 last_idx++;
2832                         }
2833                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2834                                 break;
2835                         }
2836                 }
2837
2838                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2839                         skb_headlen(skb), PCI_DMA_TODEVICE);
2840
2841                 tx_buf->skb = NULL;
2842                 last = tx_buf->nr_frags;
2843
2844                 for (i = 0; i < last; i++) {
2845                         sw_cons = NEXT_TX_BD(sw_cons);
2846
2847                         dma_unmap_page(&bp->pdev->dev,
2848                                 dma_unmap_addr(
2849                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2850                                         mapping),
2851                                 skb_shinfo(skb)->frags[i].size,
2852                                 PCI_DMA_TODEVICE);
2853                 }
2854
2855                 sw_cons = NEXT_TX_BD(sw_cons);
2856
2857                 dev_kfree_skb(skb);
2858                 tx_pkt++;
2859                 if (tx_pkt == budget)
2860                         break;
2861
2862                 if (hw_cons == sw_cons)
2863                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2864         }
2865
2866         txr->hw_tx_cons = hw_cons;
2867         txr->tx_cons = sw_cons;
2868
2869         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2870          * before checking for netif_tx_queue_stopped().  Without the
2871          * memory barrier, there is a small possibility that bnx2_start_xmit()
2872          * will miss it and cause the queue to be stopped forever.
2873          */
2874         smp_mb();
2875
2876         if (unlikely(netif_tx_queue_stopped(txq)) &&
2877                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2878                 __netif_tx_lock(txq, smp_processor_id());
2879                 if ((netif_tx_queue_stopped(txq)) &&
2880                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2881                         netif_tx_wake_queue(txq);
2882                 __netif_tx_unlock(txq);
2883         }
2884
2885         return tx_pkt;
2886 }
2887
2888 static void
2889 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2890                         struct sk_buff *skb, int count)
2891 {
2892         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2893         struct rx_bd *cons_bd, *prod_bd;
2894         int i;
2895         u16 hw_prod, prod;
2896         u16 cons = rxr->rx_pg_cons;
2897
2898         cons_rx_pg = &rxr->rx_pg_ring[cons];
2899
2900         /* The caller was unable to allocate a new page to replace the
2901          * last one in the frags array, so we need to recycle that page
2902          * and then free the skb.
2903          */
2904         if (skb) {
2905                 struct page *page;
2906                 struct skb_shared_info *shinfo;
2907
2908                 shinfo = skb_shinfo(skb);
2909                 shinfo->nr_frags--;
2910                 page = shinfo->frags[shinfo->nr_frags].page;
2911                 shinfo->frags[shinfo->nr_frags].page = NULL;
2912
2913                 cons_rx_pg->page = page;
2914                 dev_kfree_skb(skb);
2915         }
2916
2917         hw_prod = rxr->rx_pg_prod;
2918
2919         for (i = 0; i < count; i++) {
2920                 prod = RX_PG_RING_IDX(hw_prod);
2921
2922                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2923                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2924                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2925                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2926
2927                 if (prod != cons) {
2928                         prod_rx_pg->page = cons_rx_pg->page;
2929                         cons_rx_pg->page = NULL;
2930                         dma_unmap_addr_set(prod_rx_pg, mapping,
2931                                 dma_unmap_addr(cons_rx_pg, mapping));
2932
2933                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2934                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2935
2936                 }
2937                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2938                 hw_prod = NEXT_RX_BD(hw_prod);
2939         }
2940         rxr->rx_pg_prod = hw_prod;
2941         rxr->rx_pg_cons = cons;
2942 }
2943
2944 static inline void
2945 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2946                   struct sk_buff *skb, u16 cons, u16 prod)
2947 {
2948         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2949         struct rx_bd *cons_bd, *prod_bd;
2950
2951         cons_rx_buf = &rxr->rx_buf_ring[cons];
2952         prod_rx_buf = &rxr->rx_buf_ring[prod];
2953
2954         dma_sync_single_for_device(&bp->pdev->dev,
2955                 dma_unmap_addr(cons_rx_buf, mapping),
2956                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2957
2958         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2959
2960         prod_rx_buf->skb = skb;
2961         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2962
2963         if (cons == prod)
2964                 return;
2965
2966         dma_unmap_addr_set(prod_rx_buf, mapping,
2967                         dma_unmap_addr(cons_rx_buf, mapping));
2968
2969         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2970         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2971         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2972         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2973 }
2974
2975 static int
2976 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2977             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2978             u32 ring_idx)
2979 {
2980         int err;
2981         u16 prod = ring_idx & 0xffff;
2982
2983         err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2984         if (unlikely(err)) {
2985                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2986                 if (hdr_len) {
2987                         unsigned int raw_len = len + 4;
2988                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2989
2990                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2991                 }
2992                 return err;
2993         }
2994
2995         skb_reserve(skb, BNX2_RX_OFFSET);
2996         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2997                          PCI_DMA_FROMDEVICE);
2998
2999         if (hdr_len == 0) {
3000                 skb_put(skb, len);
3001                 return 0;
3002         } else {
3003                 unsigned int i, frag_len, frag_size, pages;
3004                 struct sw_pg *rx_pg;
3005                 u16 pg_cons = rxr->rx_pg_cons;
3006                 u16 pg_prod = rxr->rx_pg_prod;
3007
3008                 frag_size = len + 4 - hdr_len;
3009                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3010                 skb_put(skb, hdr_len);
3011
3012                 for (i = 0; i < pages; i++) {
3013                         dma_addr_t mapping_old;
3014
3015                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3016                         if (unlikely(frag_len <= 4)) {
3017                                 unsigned int tail = 4 - frag_len;
3018
3019                                 rxr->rx_pg_cons = pg_cons;
3020                                 rxr->rx_pg_prod = pg_prod;
3021                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3022                                                         pages - i);
3023                                 skb->len -= tail;
3024                                 if (i == 0) {
3025                                         skb->tail -= tail;
3026                                 } else {
3027                                         skb_frag_t *frag =
3028                                                 &skb_shinfo(skb)->frags[i - 1];
3029                                         frag->size -= tail;
3030                                         skb->data_len -= tail;
3031                                         skb->truesize -= tail;
3032                                 }
3033                                 return 0;
3034                         }
3035                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3036
3037                         /* Don't unmap yet.  If we're unable to allocate a new
3038                          * page, we need to recycle the page and the DMA addr.
3039                          */
3040                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3041                         if (i == pages - 1)
3042                                 frag_len -= 4;
3043
3044                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3045                         rx_pg->page = NULL;
3046
3047                         err = bnx2_alloc_rx_page(bp, rxr,
3048                                                  RX_PG_RING_IDX(pg_prod),
3049                                                  GFP_ATOMIC);
3050                         if (unlikely(err)) {
3051                                 rxr->rx_pg_cons = pg_cons;
3052                                 rxr->rx_pg_prod = pg_prod;
3053                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3054                                                         pages - i);
3055                                 return err;
3056                         }
3057
3058                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3059                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3060
3061                         frag_size -= frag_len;
3062                         skb->data_len += frag_len;
3063                         skb->truesize += frag_len;
3064                         skb->len += frag_len;
3065
3066                         pg_prod = NEXT_RX_BD(pg_prod);
3067                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3068                 }
3069                 rxr->rx_pg_prod = pg_prod;
3070                 rxr->rx_pg_cons = pg_cons;
3071         }
3072         return 0;
3073 }
3074
3075 static inline u16
3076 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3077 {
3078         u16 cons;
3079
3080         /* Tell compiler that status block fields can change. */
3081         barrier();
3082         cons = *bnapi->hw_rx_cons_ptr;
3083         barrier();
3084         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3085                 cons++;
3086         return cons;
3087 }
3088
3089 static int
3090 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3091 {
3092         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3093         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3094         struct l2_fhdr *rx_hdr;
3095         int rx_pkt = 0, pg_ring_used = 0;
3096
3097         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3098         sw_cons = rxr->rx_cons;
3099         sw_prod = rxr->rx_prod;
3100
3101         /* Memory barrier necessary as speculative reads of the rx
3102          * buffer can be ahead of the index in the status block
3103          */
3104         rmb();
3105         while (sw_cons != hw_cons) {
3106                 unsigned int len, hdr_len;
3107                 u32 status;
3108                 struct sw_bd *rx_buf, *next_rx_buf;
3109                 struct sk_buff *skb;
3110                 dma_addr_t dma_addr;
3111                 u16 vtag = 0;
3112                 int hw_vlan __maybe_unused = 0;
3113
3114                 sw_ring_cons = RX_RING_IDX(sw_cons);
3115                 sw_ring_prod = RX_RING_IDX(sw_prod);
3116
3117                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3118                 skb = rx_buf->skb;
3119                 prefetchw(skb);
3120
3121                 next_rx_buf =
3122                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3123                 prefetch(next_rx_buf->desc);
3124
3125                 rx_buf->skb = NULL;
3126
3127                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3128
3129                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3130                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3131                         PCI_DMA_FROMDEVICE);
3132
3133                 rx_hdr = rx_buf->desc;
3134                 len = rx_hdr->l2_fhdr_pkt_len;
3135                 status = rx_hdr->l2_fhdr_status;
3136
3137                 hdr_len = 0;
3138                 if (status & L2_FHDR_STATUS_SPLIT) {
3139                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3140                         pg_ring_used = 1;
3141                 } else if (len > bp->rx_jumbo_thresh) {
3142                         hdr_len = bp->rx_jumbo_thresh;
3143                         pg_ring_used = 1;
3144                 }
3145
3146                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3147                                        L2_FHDR_ERRORS_PHY_DECODE |
3148                                        L2_FHDR_ERRORS_ALIGNMENT |
3149                                        L2_FHDR_ERRORS_TOO_SHORT |
3150                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3151
3152                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3153                                           sw_ring_prod);
3154                         if (pg_ring_used) {
3155                                 int pages;
3156
3157                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3158
3159                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3160                         }
3161                         goto next_rx;
3162                 }
3163
3164                 len -= 4;
3165
3166                 if (len <= bp->rx_copy_thresh) {
3167                         struct sk_buff *new_skb;
3168
3169                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3170                         if (new_skb == NULL) {
3171                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3172                                                   sw_ring_prod);
3173                                 goto next_rx;
3174                         }
3175
3176                         /* aligned copy */
3177                         skb_copy_from_linear_data_offset(skb,
3178                                                          BNX2_RX_OFFSET - 6,
3179                                       new_skb->data, len + 6);
3180                         skb_reserve(new_skb, 6);
3181                         skb_put(new_skb, len);
3182
3183                         bnx2_reuse_rx_skb(bp, rxr, skb,
3184                                 sw_ring_cons, sw_ring_prod);
3185
3186                         skb = new_skb;
3187                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3188                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3189                         goto next_rx;
3190
3191                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3192                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3193                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3194 #ifdef BCM_VLAN
3195                         if (bp->vlgrp)
3196                                 hw_vlan = 1;
3197                         else
3198 #endif
3199                         {
3200                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3201                                         __skb_push(skb, 4);
3202
3203                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3204                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3205                                 ve->h_vlan_TCI = htons(vtag);
3206                                 len += 4;
3207                         }
3208                 }
3209
3210                 skb->protocol = eth_type_trans(skb, bp->dev);
3211
3212                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3213                         (ntohs(skb->protocol) != 0x8100)) {
3214
3215                         dev_kfree_skb(skb);
3216                         goto next_rx;
3217
3218                 }
3219
3220                 skb->ip_summed = CHECKSUM_NONE;
3221                 if (bp->rx_csum &&
3222                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3223                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3224
3225                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3226                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3227                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3228                 }
3229                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3230                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3231                      L2_FHDR_STATUS_USE_RXHASH))
3232                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3233
3234                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3235
3236 #ifdef BCM_VLAN
3237                 if (hw_vlan)
3238                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3239                 else
3240 #endif
3241                         napi_gro_receive(&bnapi->napi, skb);
3242
3243                 rx_pkt++;
3244
3245 next_rx:
3246                 sw_cons = NEXT_RX_BD(sw_cons);
3247                 sw_prod = NEXT_RX_BD(sw_prod);
3248
3249                 if ((rx_pkt == budget))
3250                         break;
3251
3252                 /* Refresh hw_cons to see if there is new work */
3253                 if (sw_cons == hw_cons) {
3254                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3255                         rmb();
3256                 }
3257         }
3258         rxr->rx_cons = sw_cons;
3259         rxr->rx_prod = sw_prod;
3260
3261         if (pg_ring_used)
3262                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3263
3264         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3265
3266         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3267
3268         mmiowb();
3269
3270         return rx_pkt;
3271
3272 }
3273
3274 /* MSI ISR - The only difference between this and the INTx ISR
3275  * is that the MSI interrupt is always serviced.
3276  */
3277 static irqreturn_t
3278 bnx2_msi(int irq, void *dev_instance)
3279 {
3280         struct bnx2_napi *bnapi = dev_instance;
3281         struct bnx2 *bp = bnapi->bp;
3282
3283         prefetch(bnapi->status_blk.msi);
3284         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3287
3288         /* Return here if interrupt is disabled. */
3289         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290                 return IRQ_HANDLED;
3291
3292         napi_schedule(&bnapi->napi);
3293
3294         return IRQ_HANDLED;
3295 }
3296
3297 static irqreturn_t
3298 bnx2_msi_1shot(int irq, void *dev_instance)
3299 {
3300         struct bnx2_napi *bnapi = dev_instance;
3301         struct bnx2 *bp = bnapi->bp;
3302
3303         prefetch(bnapi->status_blk.msi);
3304
3305         /* Return here if interrupt is disabled. */
3306         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307                 return IRQ_HANDLED;
3308
3309         napi_schedule(&bnapi->napi);
3310
3311         return IRQ_HANDLED;
3312 }
3313
3314 static irqreturn_t
3315 bnx2_interrupt(int irq, void *dev_instance)
3316 {
3317         struct bnx2_napi *bnapi = dev_instance;
3318         struct bnx2 *bp = bnapi->bp;
3319         struct status_block *sblk = bnapi->status_blk.msi;
3320
3321         /* When using INTx, it is possible for the interrupt to arrive
3322          * at the CPU before the status block posted prior to the
3323          * interrupt. Reading a register will flush the status block.
3324          * When using MSI, the MSI message will always complete after
3325          * the status block write.
3326          */
3327         if ((sblk->status_idx == bnapi->last_status_idx) &&
3328             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3329              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3330                 return IRQ_NONE;
3331
3332         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3333                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3334                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3335
3336         /* Read back to deassert IRQ immediately to avoid too many
3337          * spurious interrupts.
3338          */
3339         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3340
3341         /* Return here if interrupt is shared and is disabled. */
3342         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343                 return IRQ_HANDLED;
3344
3345         if (napi_schedule_prep(&bnapi->napi)) {
3346                 bnapi->last_status_idx = sblk->status_idx;
3347                 __napi_schedule(&bnapi->napi);
3348         }
3349
3350         return IRQ_HANDLED;
3351 }
3352
3353 static inline int
3354 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3355 {
3356         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3357         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3358
3359         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3360             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3361                 return 1;
3362         return 0;
3363 }
3364
3365 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3366                                  STATUS_ATTN_BITS_TIMER_ABORT)
3367
3368 static inline int
3369 bnx2_has_work(struct bnx2_napi *bnapi)
3370 {
3371         struct status_block *sblk = bnapi->status_blk.msi;
3372
3373         if (bnx2_has_fast_work(bnapi))
3374                 return 1;
3375
3376 #ifdef BCM_CNIC
3377         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3378                 return 1;
3379 #endif
3380
3381         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3382             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3383                 return 1;
3384
3385         return 0;
3386 }
3387
3388 static void
3389 bnx2_chk_missed_msi(struct bnx2 *bp)
3390 {
3391         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3392         u32 msi_ctrl;
3393
3394         if (bnx2_has_work(bnapi)) {
3395                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3396                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3397                         return;
3398
3399                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3400                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3401                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3402                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3403                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3404                 }
3405         }
3406
3407         bp->idle_chk_status_idx = bnapi->last_status_idx;
3408 }
3409
3410 #ifdef BCM_CNIC
3411 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3412 {
3413         struct cnic_ops *c_ops;
3414
3415         if (!bnapi->cnic_present)
3416                 return;
3417
3418         rcu_read_lock();
3419         c_ops = rcu_dereference(bp->cnic_ops);
3420         if (c_ops)
3421                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3422                                                       bnapi->status_blk.msi);
3423         rcu_read_unlock();
3424 }
3425 #endif
3426
3427 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3428 {
3429         struct status_block *sblk = bnapi->status_blk.msi;
3430         u32 status_attn_bits = sblk->status_attn_bits;
3431         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3432
3433         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3434             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3435
3436                 bnx2_phy_int(bp, bnapi);
3437
3438                 /* This is needed to take care of transient status
3439                  * during link changes.
3440                  */
3441                 REG_WR(bp, BNX2_HC_COMMAND,
3442                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3443                 REG_RD(bp, BNX2_HC_COMMAND);
3444         }
3445 }
3446
3447 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3448                           int work_done, int budget)
3449 {
3450         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3451         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3452
3453         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3454                 bnx2_tx_int(bp, bnapi, 0);
3455
3456         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3457                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3458
3459         return work_done;
3460 }
3461
3462 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3463 {
3464         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3465         struct bnx2 *bp = bnapi->bp;
3466         int work_done = 0;
3467         struct status_block_msix *sblk = bnapi->status_blk.msix;
3468
3469         while (1) {
3470                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3471                 if (unlikely(work_done >= budget))
3472                         break;
3473
3474                 bnapi->last_status_idx = sblk->status_idx;
3475                 /* status idx must be read before checking for more work. */
3476                 rmb();
3477                 if (likely(!bnx2_has_fast_work(bnapi))) {
3478
3479                         napi_complete(napi);
3480                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3481                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482                                bnapi->last_status_idx);
3483                         break;
3484                 }
3485         }
3486         return work_done;
3487 }
3488
3489 static int bnx2_poll(struct napi_struct *napi, int budget)
3490 {
3491         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3492         struct bnx2 *bp = bnapi->bp;
3493         int work_done = 0;
3494         struct status_block *sblk = bnapi->status_blk.msi;
3495
3496         while (1) {
3497                 bnx2_poll_link(bp, bnapi);
3498
3499                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3500
3501 #ifdef BCM_CNIC
3502                 bnx2_poll_cnic(bp, bnapi);
3503 #endif
3504
3505                 /* bnapi->last_status_idx is used below to tell the hw how
3506                  * much work has been processed, so we must read it before
3507                  * checking for more work.
3508                  */
3509                 bnapi->last_status_idx = sblk->status_idx;
3510
3511                 if (unlikely(work_done >= budget))
3512                         break;
3513
3514                 rmb();
3515                 if (likely(!bnx2_has_work(bnapi))) {
3516                         napi_complete(napi);
3517                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3518                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3519                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520                                        bnapi->last_status_idx);
3521                                 break;
3522                         }
3523                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3524                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3525                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3526                                bnapi->last_status_idx);
3527
3528                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3529                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3530                                bnapi->last_status_idx);
3531                         break;
3532                 }
3533         }
3534
3535         return work_done;
3536 }
3537
3538 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3539  * from set_multicast.
3540  */
3541 static void
3542 bnx2_set_rx_mode(struct net_device *dev)
3543 {
3544         struct bnx2 *bp = netdev_priv(dev);
3545         u32 rx_mode, sort_mode;
3546         struct netdev_hw_addr *ha;
3547         int i;
3548
3549         if (!netif_running(dev))
3550                 return;
3551
3552         spin_lock_bh(&bp->phy_lock);
3553
3554         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3555                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3556         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3557 #ifdef BCM_VLAN
3558         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3559                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3560 #else
3561         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3562                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3563 #endif
3564         if (dev->flags & IFF_PROMISC) {
3565                 /* Promiscuous mode. */
3566                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3567                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3568                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3569         }
3570         else if (dev->flags & IFF_ALLMULTI) {
3571                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3572                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3573                                0xffffffff);
3574                 }
3575                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3576         }
3577         else {
3578                 /* Accept one or more multicast(s). */
3579                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3580                 u32 regidx;
3581                 u32 bit;
3582                 u32 crc;
3583
3584                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3585
3586                 netdev_for_each_mc_addr(ha, dev) {
3587                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3588                         bit = crc & 0xff;
3589                         regidx = (bit & 0xe0) >> 5;
3590                         bit &= 0x1f;
3591                         mc_filter[regidx] |= (1 << bit);
3592                 }
3593
3594                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3595                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3596                                mc_filter[i]);
3597                 }
3598
3599                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3600         }
3601
3602         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3603                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3604                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3605                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3606         } else if (!(dev->flags & IFF_PROMISC)) {
3607                 /* Add all entries into to the match filter list */
3608                 i = 0;
3609                 netdev_for_each_uc_addr(ha, dev) {
3610                         bnx2_set_mac_addr(bp, ha->addr,
3611                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3612                         sort_mode |= (1 <<
3613                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3614                         i++;
3615                 }
3616
3617         }
3618
3619         if (rx_mode != bp->rx_mode) {
3620                 bp->rx_mode = rx_mode;
3621                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3622         }
3623
3624         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3625         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3626         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3627
3628         spin_unlock_bh(&bp->phy_lock);
3629 }
3630
3631 static int __devinit
3632 check_fw_section(const struct firmware *fw,
3633                  const struct bnx2_fw_file_section *section,
3634                  u32 alignment, bool non_empty)
3635 {
3636         u32 offset = be32_to_cpu(section->offset);
3637         u32 len = be32_to_cpu(section->len);
3638
3639         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3640                 return -EINVAL;
3641         if ((non_empty && len == 0) || len > fw->size - offset ||
3642             len & (alignment - 1))
3643                 return -EINVAL;
3644         return 0;
3645 }
3646
3647 static int __devinit
3648 check_mips_fw_entry(const struct firmware *fw,
3649                     const struct bnx2_mips_fw_file_entry *entry)
3650 {
3651         if (check_fw_section(fw, &entry->text, 4, true) ||
3652             check_fw_section(fw, &entry->data, 4, false) ||
3653             check_fw_section(fw, &entry->rodata, 4, false))
3654                 return -EINVAL;
3655         return 0;
3656 }
3657
3658 static int __devinit
3659 bnx2_request_firmware(struct bnx2 *bp)
3660 {
3661         const char *mips_fw_file, *rv2p_fw_file;
3662         const struct bnx2_mips_fw_file *mips_fw;
3663         const struct bnx2_rv2p_fw_file *rv2p_fw;
3664         int rc;
3665
3666         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3667                 mips_fw_file = FW_MIPS_FILE_09;
3668                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3669                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3670                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3671                 else
3672                         rv2p_fw_file = FW_RV2P_FILE_09;
3673         } else {
3674                 mips_fw_file = FW_MIPS_FILE_06;
3675                 rv2p_fw_file = FW_RV2P_FILE_06;
3676         }
3677
3678         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3679         if (rc) {
3680                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3681                 return rc;
3682         }
3683
3684         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3685         if (rc) {
3686                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3687                 return rc;
3688         }
3689         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3690         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3691         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3692             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3693             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3694             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3695             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3696             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3697                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3698                 return -EINVAL;
3699         }
3700         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3701             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3702             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3703                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3704                 return -EINVAL;
3705         }
3706
3707         return 0;
3708 }
3709
3710 static u32
3711 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3712 {
3713         switch (idx) {
3714         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3715                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3716                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3717                 break;
3718         }
3719         return rv2p_code;
3720 }
3721
3722 static int
3723 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3724              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3725 {
3726         u32 rv2p_code_len, file_offset;
3727         __be32 *rv2p_code;
3728         int i;
3729         u32 val, cmd, addr;
3730
3731         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3732         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3733
3734         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3735
3736         if (rv2p_proc == RV2P_PROC1) {
3737                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3738                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3739         } else {
3740                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3741                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3742         }
3743
3744         for (i = 0; i < rv2p_code_len; i += 8) {
3745                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3746                 rv2p_code++;
3747                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3748                 rv2p_code++;
3749
3750                 val = (i / 8) | cmd;
3751                 REG_WR(bp, addr, val);
3752         }
3753
3754         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3755         for (i = 0; i < 8; i++) {
3756                 u32 loc, code;
3757
3758                 loc = be32_to_cpu(fw_entry->fixup[i]);
3759                 if (loc && ((loc * 4) < rv2p_code_len)) {
3760                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3761                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3762                         code = be32_to_cpu(*(rv2p_code + loc));
3763                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3764                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3765
3766                         val = (loc / 2) | cmd;
3767                         REG_WR(bp, addr, val);
3768                 }
3769         }
3770
3771         /* Reset the processor, un-stall is done later. */
3772         if (rv2p_proc == RV2P_PROC1) {
3773                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3774         }
3775         else {
3776                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3777         }
3778
3779         return 0;
3780 }
3781
3782 static int
3783 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3784             const struct bnx2_mips_fw_file_entry *fw_entry)
3785 {
3786         u32 addr, len, file_offset;
3787         __be32 *data;
3788         u32 offset;
3789         u32 val;
3790
3791         /* Halt the CPU. */
3792         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3793         val |= cpu_reg->mode_value_halt;
3794         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3795         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3796
3797         /* Load the Text area. */
3798         addr = be32_to_cpu(fw_entry->text.addr);
3799         len = be32_to_cpu(fw_entry->text.len);
3800         file_offset = be32_to_cpu(fw_entry->text.offset);
3801         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3802
3803         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3804         if (len) {
3805                 int j;
3806
3807                 for (j = 0; j < (len / 4); j++, offset += 4)
3808                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3809         }
3810
3811         /* Load the Data area. */
3812         addr = be32_to_cpu(fw_entry->data.addr);
3813         len = be32_to_cpu(fw_entry->data.len);
3814         file_offset = be32_to_cpu(fw_entry->data.offset);
3815         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3816
3817         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3818         if (len) {
3819                 int j;
3820
3821                 for (j = 0; j < (len / 4); j++, offset += 4)
3822                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3823         }
3824
3825         /* Load the Read-Only area. */
3826         addr = be32_to_cpu(fw_entry->rodata.addr);
3827         len = be32_to_cpu(fw_entry->rodata.len);
3828         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3829         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3830
3831         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3832         if (len) {
3833                 int j;
3834
3835                 for (j = 0; j < (len / 4); j++, offset += 4)
3836                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3837         }
3838
3839         /* Clear the pre-fetch instruction. */
3840         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3841
3842         val = be32_to_cpu(fw_entry->start_addr);
3843         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3844
3845         /* Start the CPU. */
3846         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3847         val &= ~cpu_reg->mode_value_halt;
3848         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3849         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3850
3851         return 0;
3852 }
3853
3854 static int
3855 bnx2_init_cpus(struct bnx2 *bp)
3856 {
3857         const struct bnx2_mips_fw_file *mips_fw =
3858                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3859         const struct bnx2_rv2p_fw_file *rv2p_fw =
3860                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3861         int rc;
3862
3863         /* Initialize the RV2P processor. */
3864         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3865         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3866
3867         /* Initialize the RX Processor. */
3868         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3869         if (rc)
3870                 goto init_cpu_err;
3871
3872         /* Initialize the TX Processor. */
3873         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3874         if (rc)
3875                 goto init_cpu_err;
3876
3877         /* Initialize the TX Patch-up Processor. */
3878         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3879         if (rc)
3880                 goto init_cpu_err;
3881
3882         /* Initialize the Completion Processor. */
3883         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3884         if (rc)
3885                 goto init_cpu_err;
3886
3887         /* Initialize the Command Processor. */
3888         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3889
3890 init_cpu_err:
3891         return rc;
3892 }
3893
3894 static int
3895 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3896 {
3897         u16 pmcsr;
3898
3899         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3900
3901         switch (state) {
3902         case PCI_D0: {
3903                 u32 val;
3904
3905                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3906                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3907                         PCI_PM_CTRL_PME_STATUS);
3908
3909                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3910                         /* delay required during transition out of D3hot */
3911                         msleep(20);
3912
3913                 val = REG_RD(bp, BNX2_EMAC_MODE);
3914                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3915                 val &= ~BNX2_EMAC_MODE_MPKT;
3916                 REG_WR(bp, BNX2_EMAC_MODE, val);
3917
3918                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3919                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3920                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3921                 break;
3922         }
3923         case PCI_D3hot: {
3924                 int i;
3925                 u32 val, wol_msg;
3926
3927                 if (bp->wol) {
3928                         u32 advertising;
3929                         u8 autoneg;
3930
3931                         autoneg = bp->autoneg;
3932                         advertising = bp->advertising;
3933
3934                         if (bp->phy_port == PORT_TP) {
3935                                 bp->autoneg = AUTONEG_SPEED;
3936                                 bp->advertising = ADVERTISED_10baseT_Half |
3937                                         ADVERTISED_10baseT_Full |
3938                                         ADVERTISED_100baseT_Half |
3939                                         ADVERTISED_100baseT_Full |
3940                                         ADVERTISED_Autoneg;
3941                         }
3942
3943                         spin_lock_bh(&bp->phy_lock);
3944                         bnx2_setup_phy(bp, bp->phy_port);
3945                         spin_unlock_bh(&bp->phy_lock);
3946
3947                         bp->autoneg = autoneg;
3948                         bp->advertising = advertising;
3949
3950                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3951
3952                         val = REG_RD(bp, BNX2_EMAC_MODE);
3953
3954                         /* Enable port mode. */
3955                         val &= ~BNX2_EMAC_MODE_PORT;
3956                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3957                                BNX2_EMAC_MODE_ACPI_RCVD |
3958                                BNX2_EMAC_MODE_MPKT;
3959                         if (bp->phy_port == PORT_TP)
3960                                 val |= BNX2_EMAC_MODE_PORT_MII;
3961                         else {
3962                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3963                                 if (bp->line_speed == SPEED_2500)
3964                                         val |= BNX2_EMAC_MODE_25G_MODE;
3965                         }
3966
3967                         REG_WR(bp, BNX2_EMAC_MODE, val);
3968
3969                         /* receive all multicast */
3970                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3971                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3972                                        0xffffffff);
3973                         }
3974                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3975                                BNX2_EMAC_RX_MODE_SORT_MODE);
3976
3977                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3978                               BNX2_RPM_SORT_USER0_MC_EN;
3979                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3980                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3981                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3982                                BNX2_RPM_SORT_USER0_ENA);
3983
3984                         /* Need to enable EMAC and RPM for WOL. */
3985                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3986                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3987                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3988                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3989
3990                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3991                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3992                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3993
3994                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3995                 }
3996                 else {
3997                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3998                 }
3999
4000                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4001                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4002                                      1, 0);
4003
4004                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4005                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4006                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4007
4008                         if (bp->wol)
4009                                 pmcsr |= 3;
4010                 }
4011                 else {
4012                         pmcsr |= 3;
4013                 }
4014                 if (bp->wol) {
4015                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4016                 }
4017                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4018                                       pmcsr);
4019
4020                 /* No more memory access after this point until
4021                  * device is brought back to D0.
4022                  */
4023                 udelay(50);
4024                 break;
4025         }
4026         default:
4027                 return -EINVAL;
4028         }
4029         return 0;
4030 }
4031
4032 static int
4033 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4034 {
4035         u32 val;
4036         int j;
4037
4038         /* Request access to the flash interface. */
4039         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4040         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4041                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4042                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4043                         break;
4044
4045                 udelay(5);
4046         }
4047
4048         if (j >= NVRAM_TIMEOUT_COUNT)
4049                 return -EBUSY;
4050
4051         return 0;
4052 }
4053
4054 static int
4055 bnx2_release_nvram_lock(struct bnx2 *bp)
4056 {
4057         int j;
4058         u32 val;
4059
4060         /* Relinquish nvram interface. */
4061         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4062
4063         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4064                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4065                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4066                         break;
4067
4068                 udelay(5);
4069         }
4070
4071         if (j >= NVRAM_TIMEOUT_COUNT)
4072                 return -EBUSY;
4073
4074         return 0;
4075 }
4076
4077
4078 static int
4079 bnx2_enable_nvram_write(struct bnx2 *bp)
4080 {
4081         u32 val;
4082
4083         val = REG_RD(bp, BNX2_MISC_CFG);
4084         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4085
4086         if (bp->flash_info->flags & BNX2_NV_WREN) {
4087                 int j;
4088
4089                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4090                 REG_WR(bp, BNX2_NVM_COMMAND,
4091                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4092
4093                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4094                         udelay(5);
4095
4096                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4097                         if (val & BNX2_NVM_COMMAND_DONE)
4098                                 break;
4099                 }
4100
4101                 if (j >= NVRAM_TIMEOUT_COUNT)
4102                         return -EBUSY;
4103         }
4104         return 0;
4105 }
4106
4107 static void
4108 bnx2_disable_nvram_write(struct bnx2 *bp)
4109 {
4110         u32 val;
4111
4112         val = REG_RD(bp, BNX2_MISC_CFG);
4113         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4114 }
4115
4116
4117 static void
4118 bnx2_enable_nvram_access(struct bnx2 *bp)
4119 {
4120         u32 val;
4121
4122         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4123         /* Enable both bits, even on read. */
4124         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4125                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4126 }
4127
4128 static void
4129 bnx2_disable_nvram_access(struct bnx2 *bp)
4130 {
4131         u32 val;
4132
4133         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4134         /* Disable both bits, even after read. */
4135         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4136                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4137                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4138 }
4139
4140 static int
4141 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4142 {
4143         u32 cmd;
4144         int j;
4145
4146         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4147                 /* Buffered flash, no erase needed */
4148                 return 0;
4149
4150         /* Build an erase command */
4151         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4152               BNX2_NVM_COMMAND_DOIT;
4153
4154         /* Need to clear DONE bit separately. */
4155         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4156
4157         /* Address of the NVRAM to read from. */
4158         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4159
4160         /* Issue an erase command. */
4161         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4162
4163         /* Wait for completion. */
4164         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4165                 u32 val;
4166
4167                 udelay(5);
4168
4169                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4170                 if (val & BNX2_NVM_COMMAND_DONE)
4171                         break;
4172         }
4173
4174         if (j >= NVRAM_TIMEOUT_COUNT)
4175                 return -EBUSY;
4176
4177         return 0;
4178 }
4179
4180 static int
4181 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4182 {
4183         u32 cmd;
4184         int j;
4185
4186         /* Build the command word. */
4187         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4188
4189         /* Calculate an offset of a buffered flash, not needed for 5709. */
4190         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4191                 offset = ((offset / bp->flash_info->page_size) <<
4192                            bp->flash_info->page_bits) +
4193                           (offset % bp->flash_info->page_size);
4194         }
4195
4196         /* Need to clear DONE bit separately. */
4197         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4198
4199         /* Address of the NVRAM to read from. */
4200         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4201
4202         /* Issue a read command. */
4203         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4204
4205         /* Wait for completion. */
4206         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4207                 u32 val;
4208
4209                 udelay(5);
4210
4211                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4212                 if (val & BNX2_NVM_COMMAND_DONE) {
4213                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4214                         memcpy(ret_val, &v, 4);
4215                         break;
4216                 }
4217         }
4218         if (j >= NVRAM_TIMEOUT_COUNT)
4219                 return -EBUSY;
4220
4221         return 0;
4222 }
4223
4224
4225 static int
4226 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4227 {
4228         u32 cmd;
4229         __be32 val32;
4230         int j;
4231
4232         /* Build the command word. */
4233         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4234
4235         /* Calculate an offset of a buffered flash, not needed for 5709. */
4236         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4237                 offset = ((offset / bp->flash_info->page_size) <<
4238                           bp->flash_info->page_bits) +
4239                          (offset % bp->flash_info->page_size);
4240         }
4241
4242         /* Need to clear DONE bit separately. */
4243         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4244
4245         memcpy(&val32, val, 4);
4246
4247         /* Write the data. */
4248         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4249
4250         /* Address of the NVRAM to write to. */
4251         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4252
4253         /* Issue the write command. */
4254         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4255
4256         /* Wait for completion. */
4257         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4258                 udelay(5);
4259
4260                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4261                         break;
4262         }
4263         if (j >= NVRAM_TIMEOUT_COUNT)
4264                 return -EBUSY;
4265
4266         return 0;
4267 }
4268
4269 static int
4270 bnx2_init_nvram(struct bnx2 *bp)
4271 {
4272         u32 val;
4273         int j, entry_count, rc = 0;
4274         const struct flash_spec *flash;
4275
4276         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4277                 bp->flash_info = &flash_5709;
4278                 goto get_flash_size;
4279         }
4280
4281         /* Determine the selected interface. */
4282         val = REG_RD(bp, BNX2_NVM_CFG1);
4283
4284         entry_count = ARRAY_SIZE(flash_table);
4285
4286         if (val & 0x40000000) {
4287
4288                 /* Flash interface has been reconfigured */
4289                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4290                      j++, flash++) {
4291                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4292                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4293                                 bp->flash_info = flash;
4294                                 break;
4295                         }
4296                 }
4297         }
4298         else {
4299                 u32 mask;
4300                 /* Not yet been reconfigured */
4301
4302                 if (val & (1 << 23))
4303                         mask = FLASH_BACKUP_STRAP_MASK;
4304                 else
4305                         mask = FLASH_STRAP_MASK;
4306
4307                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4308                         j++, flash++) {
4309
4310                         if ((val & mask) == (flash->strapping & mask)) {
4311                                 bp->flash_info = flash;
4312
4313                                 /* Request access to the flash interface. */
4314                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4315                                         return rc;
4316
4317                                 /* Enable access to flash interface */
4318                                 bnx2_enable_nvram_access(bp);
4319
4320                                 /* Reconfigure the flash interface */
4321                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4322                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4323                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4324                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4325
4326                                 /* Disable access to flash interface */
4327                                 bnx2_disable_nvram_access(bp);
4328                                 bnx2_release_nvram_lock(bp);
4329
4330                                 break;
4331                         }
4332                 }
4333         } /* if (val & 0x40000000) */
4334
4335         if (j == entry_count) {
4336                 bp->flash_info = NULL;
4337                 pr_alert("Unknown flash/EEPROM type\n");
4338                 return -ENODEV;
4339         }
4340
4341 get_flash_size:
4342         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4343         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4344         if (val)
4345                 bp->flash_size = val;
4346         else
4347                 bp->flash_size = bp->flash_info->total_size;
4348
4349         return rc;
4350 }
4351
4352 static int
4353 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4354                 int buf_size)
4355 {
4356         int rc = 0;
4357         u32 cmd_flags, offset32, len32, extra;
4358
4359         if (buf_size == 0)
4360                 return 0;
4361
4362         /* Request access to the flash interface. */
4363         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4364                 return rc;
4365
4366         /* Enable access to flash interface */
4367         bnx2_enable_nvram_access(bp);
4368
4369         len32 = buf_size;
4370         offset32 = offset;
4371         extra = 0;
4372
4373         cmd_flags = 0;
4374
4375         if (offset32 & 3) {
4376                 u8 buf[4];
4377                 u32 pre_len;
4378
4379                 offset32 &= ~3;
4380                 pre_len = 4 - (offset & 3);
4381
4382                 if (pre_len >= len32) {
4383                         pre_len = len32;
4384                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4385                                     BNX2_NVM_COMMAND_LAST;
4386                 }
4387                 else {
4388                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4389                 }
4390
4391                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4392
4393                 if (rc)
4394                         return rc;
4395
4396                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4397
4398                 offset32 += 4;
4399                 ret_buf += pre_len;
4400                 len32 -= pre_len;
4401         }
4402         if (len32 & 3) {
4403                 extra = 4 - (len32 & 3);
4404                 len32 = (len32 + 4) & ~3;
4405         }
4406
4407         if (len32 == 4) {
4408                 u8 buf[4];
4409
4410                 if (cmd_flags)
4411                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4412                 else
4413                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4414                                     BNX2_NVM_COMMAND_LAST;
4415
4416                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4417
4418                 memcpy(ret_buf, buf, 4 - extra);
4419         }
4420         else if (len32 > 0) {
4421                 u8 buf[4];
4422
4423                 /* Read the first word. */
4424                 if (cmd_flags)
4425                         cmd_flags = 0;
4426                 else
4427                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4428
4429                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4430
4431                 /* Advance to the next dword. */
4432                 offset32 += 4;
4433                 ret_buf += 4;
4434                 len32 -= 4;
4435
4436                 while (len32 > 4 && rc == 0) {
4437                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4438
4439                         /* Advance to the next dword. */
4440                         offset32 += 4;
4441                         ret_buf += 4;
4442                         len32 -= 4;
4443                 }
4444
4445                 if (rc)
4446                         return rc;
4447
4448                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4449                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4450
4451                 memcpy(ret_buf, buf, 4 - extra);
4452         }
4453
4454         /* Disable access to flash interface */
4455         bnx2_disable_nvram_access(bp);
4456
4457         bnx2_release_nvram_lock(bp);
4458
4459         return rc;
4460 }
4461
4462 static int
4463 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4464                 int buf_size)
4465 {
4466         u32 written, offset32, len32;
4467         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4468         int rc = 0;
4469         int align_start, align_end;
4470
4471         buf = data_buf;
4472         offset32 = offset;
4473         len32 = buf_size;
4474         align_start = align_end = 0;
4475
4476         if ((align_start = (offset32 & 3))) {
4477                 offset32 &= ~3;
4478                 len32 += align_start;
4479                 if (len32 < 4)
4480                         len32 = 4;
4481                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4482                         return rc;
4483         }
4484
4485         if (len32 & 3) {
4486                 align_end = 4 - (len32 & 3);
4487                 len32 += align_end;
4488                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4489                         return rc;
4490         }
4491
4492         if (align_start || align_end) {
4493                 align_buf = kmalloc(len32, GFP_KERNEL);
4494                 if (align_buf == NULL)
4495                         return -ENOMEM;
4496                 if (align_start) {
4497                         memcpy(align_buf, start, 4);
4498                 }
4499                 if (align_end) {
4500                         memcpy(align_buf + len32 - 4, end, 4);
4501                 }
4502                 memcpy(align_buf + align_start, data_buf, buf_size);
4503                 buf = align_buf;
4504         }
4505
4506         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4507                 flash_buffer = kmalloc(264, GFP_KERNEL);
4508                 if (flash_buffer == NULL) {
4509                         rc = -ENOMEM;
4510                         goto nvram_write_end;
4511                 }
4512         }
4513
4514         written = 0;
4515         while ((written < len32) && (rc == 0)) {
4516                 u32 page_start, page_end, data_start, data_end;
4517                 u32 addr, cmd_flags;
4518                 int i;
4519
4520                 /* Find the page_start addr */
4521                 page_start = offset32 + written;
4522                 page_start -= (page_start % bp->flash_info->page_size);
4523                 /* Find the page_end addr */
4524                 page_end = page_start + bp->flash_info->page_size;
4525                 /* Find the data_start addr */
4526                 data_start = (written == 0) ? offset32 : page_start;
4527                 /* Find the data_end addr */
4528                 data_end = (page_end > offset32 + len32) ?
4529                         (offset32 + len32) : page_end;
4530
4531                 /* Request access to the flash interface. */
4532                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4533                         goto nvram_write_end;
4534
4535                 /* Enable access to flash interface */
4536                 bnx2_enable_nvram_access(bp);
4537
4538                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4539                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4540                         int j;
4541
4542                         /* Read the whole page into the buffer
4543                          * (non-buffer flash only) */
4544                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4545                                 if (j == (bp->flash_info->page_size - 4)) {
4546                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4547                                 }
4548                                 rc = bnx2_nvram_read_dword(bp,
4549                                         page_start + j,
4550                                         &flash_buffer[j],
4551                                         cmd_flags);
4552
4553                                 if (rc)
4554                                         goto nvram_write_end;
4555
4556                                 cmd_flags = 0;
4557                         }
4558                 }
4559
4560                 /* Enable writes to flash interface (unlock write-protect) */
4561                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4562                         goto nvram_write_end;
4563
4564                 /* Loop to write back the buffer data from page_start to
4565                  * data_start */
4566                 i = 0;
4567                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4568                         /* Erase the page */
4569                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4570                                 goto nvram_write_end;
4571
4572                         /* Re-enable the write again for the actual write */
4573                         bnx2_enable_nvram_write(bp);
4574
4575                         for (addr = page_start; addr < data_start;
4576                                 addr += 4, i += 4) {
4577
4578                                 rc = bnx2_nvram_write_dword(bp, addr,
4579                                         &flash_buffer[i], cmd_flags);
4580
4581                                 if (rc != 0)
4582                                         goto nvram_write_end;
4583
4584                                 cmd_flags = 0;
4585                         }
4586                 }
4587
4588                 /* Loop to write the new data from data_start to data_end */
4589                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4590                         if ((addr == page_end - 4) ||
4591                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4592                                  (addr == data_end - 4))) {
4593
4594                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4595                         }
4596                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4597                                 cmd_flags);
4598
4599                         if (rc != 0)
4600                                 goto nvram_write_end;
4601
4602                         cmd_flags = 0;
4603                         buf += 4;
4604                 }
4605
4606                 /* Loop to write back the buffer data from data_end
4607                  * to page_end */
4608                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4609                         for (addr = data_end; addr < page_end;
4610                                 addr += 4, i += 4) {
4611
4612                                 if (addr == page_end-4) {
4613                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4614                                 }
4615                                 rc = bnx2_nvram_write_dword(bp, addr,
4616                                         &flash_buffer[i], cmd_flags);
4617
4618                                 if (rc != 0)
4619                                         goto nvram_write_end;
4620
4621                                 cmd_flags = 0;
4622                         }
4623                 }
4624
4625                 /* Disable writes to flash interface (lock write-protect) */
4626                 bnx2_disable_nvram_write(bp);
4627
4628                 /* Disable access to flash interface */
4629                 bnx2_disable_nvram_access(bp);
4630                 bnx2_release_nvram_lock(bp);
4631
4632                 /* Increment written */
4633                 written += data_end - data_start;
4634         }
4635
4636 nvram_write_end:
4637         kfree(flash_buffer);
4638         kfree(align_buf);
4639         return rc;
4640 }
4641
4642 static void
4643 bnx2_init_fw_cap(struct bnx2 *bp)
4644 {
4645         u32 val, sig = 0;
4646
4647         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4648         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4649
4650         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4651                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4652
4653         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4654         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4655                 return;
4656
4657         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4658                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4659                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4660         }
4661
4662         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4663             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4664                 u32 link;
4665
4666                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4667
4668                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4669                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4670                         bp->phy_port = PORT_FIBRE;
4671                 else
4672                         bp->phy_port = PORT_TP;
4673
4674                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4675                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4676         }
4677
4678         if (netif_running(bp->dev) && sig)
4679                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4680 }
4681
4682 static void
4683 bnx2_setup_msix_tbl(struct bnx2 *bp)
4684 {
4685         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4686
4687         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4688         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4689 }
4690
4691 static int
4692 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4693 {
4694         u32 val;
4695         int i, rc = 0;
4696         u8 old_port;
4697
4698         /* Wait for the current PCI transaction to complete before
4699          * issuing a reset. */
4700         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4701                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4702                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4703                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4704                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4705         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4706         udelay(5);
4707
4708         /* Wait for the firmware to tell us it is ok to issue a reset. */
4709         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4710
4711         /* Deposit a driver reset signature so the firmware knows that
4712          * this is a soft reset. */
4713         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4714                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4715
4716         /* Do a dummy read to force the chip to complete all current transaction
4717          * before we issue a reset. */
4718         val = REG_RD(bp, BNX2_MISC_ID);
4719
4720         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4721                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4722                 REG_RD(bp, BNX2_MISC_COMMAND);
4723                 udelay(5);
4724
4725                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4726                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4727
4728                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4729
4730         } else {
4731                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4733                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4734
4735                 /* Chip reset. */
4736                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4737
4738                 /* Reading back any register after chip reset will hang the
4739                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4740                  * of margin for write posting.
4741                  */
4742                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4743                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4744                         msleep(20);
4745
4746                 /* Reset takes approximate 30 usec */
4747                 for (i = 0; i < 10; i++) {
4748                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4749                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4750                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4751                                 break;
4752                         udelay(10);
4753                 }
4754
4755                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4756                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4757                         pr_err("Chip reset did not complete\n");
4758                         return -EBUSY;
4759                 }
4760         }
4761
4762         /* Make sure byte swapping is properly configured. */
4763         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4764         if (val != 0x01020304) {
4765                 pr_err("Chip not in correct endian mode\n");
4766                 return -ENODEV;
4767         }
4768
4769         /* Wait for the firmware to finish its initialization. */
4770         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4771         if (rc)
4772                 return rc;
4773
4774         spin_lock_bh(&bp->phy_lock);
4775         old_port = bp->phy_port;
4776         bnx2_init_fw_cap(bp);
4777         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4778             old_port != bp->phy_port)
4779                 bnx2_set_default_remote_link(bp);
4780         spin_unlock_bh(&bp->phy_lock);
4781
4782         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4783                 /* Adjust the voltage regular to two steps lower.  The default
4784                  * of this register is 0x0000000e. */
4785                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4786
4787                 /* Remove bad rbuf memory from the free pool. */
4788                 rc = bnx2_alloc_bad_rbuf(bp);
4789         }
4790
4791         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4792                 bnx2_setup_msix_tbl(bp);
4793                 /* Prevent MSIX table reads and write from timing out */
4794                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4795                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4796         }
4797
4798         return rc;
4799 }
4800
4801 static int
4802 bnx2_init_chip(struct bnx2 *bp)
4803 {
4804         u32 val, mtu;
4805         int rc, i;
4806
4807         /* Make sure the interrupt is not active. */
4808         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4809
4810         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4811               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4812 #ifdef __BIG_ENDIAN
4813               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4814 #endif
4815               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4816               DMA_READ_CHANS << 12 |
4817               DMA_WRITE_CHANS << 16;
4818
4819         val |= (0x2 << 20) | (1 << 11);
4820
4821         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4822                 val |= (1 << 23);
4823
4824         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4825             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4826                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4827
4828         REG_WR(bp, BNX2_DMA_CONFIG, val);
4829
4830         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4831                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4832                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4833                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4834         }
4835
4836         if (bp->flags & BNX2_FLAG_PCIX) {
4837                 u16 val16;
4838
4839                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4840                                      &val16);
4841                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4842                                       val16 & ~PCI_X_CMD_ERO);
4843         }
4844
4845         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4846                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4847                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4848                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4849
4850         /* Initialize context mapping and zero out the quick contexts.  The
4851          * context block must have already been enabled. */
4852         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4853                 rc = bnx2_init_5709_context(bp);
4854                 if (rc)
4855                         return rc;
4856         } else
4857                 bnx2_init_context(bp);
4858
4859         if ((rc = bnx2_init_cpus(bp)) != 0)
4860                 return rc;
4861
4862         bnx2_init_nvram(bp);
4863
4864         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4865
4866         val = REG_RD(bp, BNX2_MQ_CONFIG);
4867         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4868         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4869         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4870                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4871                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4872                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4873         }
4874
4875         REG_WR(bp, BNX2_MQ_CONFIG, val);
4876
4877         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4878         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4879         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4880
4881         val = (BCM_PAGE_BITS - 8) << 24;
4882         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4883
4884         /* Configure page size. */
4885         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4886         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4887         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4888         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4889
4890         val = bp->mac_addr[0] +
4891               (bp->mac_addr[1] << 8) +
4892               (bp->mac_addr[2] << 16) +
4893               bp->mac_addr[3] +
4894               (bp->mac_addr[4] << 8) +
4895               (bp->mac_addr[5] << 16);
4896         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4897
4898         /* Program the MTU.  Also include 4 bytes for CRC32. */
4899         mtu = bp->dev->mtu;
4900         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4901         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4902                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4903         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4904
4905         if (mtu < 1500)
4906                 mtu = 1500;
4907
4908         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4909         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4910         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4911
4912         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4913         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4914                 bp->bnx2_napi[i].last_status_idx = 0;
4915
4916         bp->idle_chk_status_idx = 0xffff;
4917
4918         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4919
4920         /* Set up how to generate a link change interrupt. */
4921         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4922
4923         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4924                (u64) bp->status_blk_mapping & 0xffffffff);
4925         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4926
4927         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4928                (u64) bp->stats_blk_mapping & 0xffffffff);
4929         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4930                (u64) bp->stats_blk_mapping >> 32);
4931
4932         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4933                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4934
4935         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4936                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4937
4938         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4939                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4940
4941         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4942
4943         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4944
4945         REG_WR(bp, BNX2_HC_COM_TICKS,
4946                (bp->com_ticks_int << 16) | bp->com_ticks);
4947
4948         REG_WR(bp, BNX2_HC_CMD_TICKS,
4949                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4950
4951         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4952                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4953         else
4954                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4955         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4956
4957         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4958                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4959         else {
4960                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4961                       BNX2_HC_CONFIG_COLLECT_STATS;
4962         }
4963
4964         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4965                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4966                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4967
4968                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4969         }
4970
4971         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4972                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4973
4974         REG_WR(bp, BNX2_HC_CONFIG, val);
4975
4976         for (i = 1; i < bp->irq_nvecs; i++) {
4977                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4978                            BNX2_HC_SB_CONFIG_1;
4979
4980                 REG_WR(bp, base,
4981                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4982                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4983                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4984
4985                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4986                         (bp->tx_quick_cons_trip_int << 16) |
4987                          bp->tx_quick_cons_trip);
4988
4989                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4990                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4991
4992                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4993                        (bp->rx_quick_cons_trip_int << 16) |
4994                         bp->rx_quick_cons_trip);
4995
4996                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4997                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4998         }
4999
5000         /* Clear internal stats counters. */
5001         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5002
5003         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5004
5005         /* Initialize the receive filter. */
5006         bnx2_set_rx_mode(bp->dev);
5007
5008         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5009                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5010                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5011                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5012         }
5013         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5014                           1, 0);
5015
5016         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5017         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5018
5019         udelay(20);
5020
5021         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5022
5023         return rc;
5024 }
5025
5026 static void
5027 bnx2_clear_ring_states(struct bnx2 *bp)
5028 {
5029         struct bnx2_napi *bnapi;
5030         struct bnx2_tx_ring_info *txr;
5031         struct bnx2_rx_ring_info *rxr;
5032         int i;
5033
5034         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5035                 bnapi = &bp->bnx2_napi[i];
5036                 txr = &bnapi->tx_ring;
5037                 rxr = &bnapi->rx_ring;
5038
5039                 txr->tx_cons = 0;
5040                 txr->hw_tx_cons = 0;
5041                 rxr->rx_prod_bseq = 0;
5042                 rxr->rx_prod = 0;
5043                 rxr->rx_cons = 0;
5044                 rxr->rx_pg_prod = 0;
5045                 rxr->rx_pg_cons = 0;
5046         }
5047 }
5048
5049 static void
5050 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5051 {
5052         u32 val, offset0, offset1, offset2, offset3;
5053         u32 cid_addr = GET_CID_ADDR(cid);
5054
5055         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5056                 offset0 = BNX2_L2CTX_TYPE_XI;
5057                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5058                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5059                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5060         } else {
5061                 offset0 = BNX2_L2CTX_TYPE;
5062                 offset1 = BNX2_L2CTX_CMD_TYPE;
5063                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5064                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5065         }
5066         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5067         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5068
5069         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5070         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5071
5072         val = (u64) txr->tx_desc_mapping >> 32;
5073         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5074
5075         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5076         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5077 }
5078
5079 static void
5080 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5081 {
5082         struct tx_bd *txbd;
5083         u32 cid = TX_CID;
5084         struct bnx2_napi *bnapi;
5085         struct bnx2_tx_ring_info *txr;
5086
5087         bnapi = &bp->bnx2_napi[ring_num];
5088         txr = &bnapi->tx_ring;
5089
5090         if (ring_num == 0)
5091                 cid = TX_CID;
5092         else
5093                 cid = TX_TSS_CID + ring_num - 1;
5094
5095         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5096
5097         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5098
5099         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5100         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5101
5102         txr->tx_prod = 0;
5103         txr->tx_prod_bseq = 0;
5104
5105         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5106         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5107
5108         bnx2_init_tx_context(bp, cid, txr);
5109 }
5110
5111 static void
5112 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5113                      int num_rings)
5114 {
5115         int i;
5116         struct rx_bd *rxbd;
5117
5118         for (i = 0; i < num_rings; i++) {
5119                 int j;
5120
5121                 rxbd = &rx_ring[i][0];
5122                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5123                         rxbd->rx_bd_len = buf_size;
5124                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5125                 }
5126                 if (i == (num_rings - 1))
5127                         j = 0;
5128                 else
5129                         j = i + 1;
5130                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5131                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5132         }
5133 }
5134
5135 static void
5136 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5137 {
5138         int i;
5139         u16 prod, ring_prod;
5140         u32 cid, rx_cid_addr, val;
5141         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5142         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5143
5144         if (ring_num == 0)
5145                 cid = RX_CID;
5146         else
5147                 cid = RX_RSS_CID + ring_num - 1;
5148
5149         rx_cid_addr = GET_CID_ADDR(cid);
5150
5151         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5152                              bp->rx_buf_use_size, bp->rx_max_ring);
5153
5154         bnx2_init_rx_context(bp, cid);
5155
5156         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5157                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5158                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5159         }
5160
5161         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5162         if (bp->rx_pg_ring_size) {
5163                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5164                                      rxr->rx_pg_desc_mapping,
5165                                      PAGE_SIZE, bp->rx_max_pg_ring);
5166                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5167                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5168                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5169                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5170
5171                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5172                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5173
5174                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5175                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5176
5177                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5178                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5179         }
5180
5181         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5182         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5183
5184         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5185         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5186
5187         ring_prod = prod = rxr->rx_pg_prod;
5188         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5189                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5190                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5191                                     ring_num, i, bp->rx_pg_ring_size);
5192                         break;
5193                 }
5194                 prod = NEXT_RX_BD(prod);
5195                 ring_prod = RX_PG_RING_IDX(prod);
5196         }
5197         rxr->rx_pg_prod = prod;
5198
5199         ring_prod = prod = rxr->rx_prod;
5200         for (i = 0; i < bp->rx_ring_size; i++) {
5201                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5202                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5203                                     ring_num, i, bp->rx_ring_size);
5204                         break;
5205                 }
5206                 prod = NEXT_RX_BD(prod);
5207                 ring_prod = RX_RING_IDX(prod);
5208         }
5209         rxr->rx_prod = prod;
5210
5211         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5212         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5213         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5214
5215         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5216         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5217
5218         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5219 }
5220
5221 static void
5222 bnx2_init_all_rings(struct bnx2 *bp)
5223 {
5224         int i;
5225         u32 val;
5226
5227         bnx2_clear_ring_states(bp);
5228
5229         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5230         for (i = 0; i < bp->num_tx_rings; i++)
5231                 bnx2_init_tx_ring(bp, i);
5232
5233         if (bp->num_tx_rings > 1)
5234                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5235                        (TX_TSS_CID << 7));
5236
5237         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5238         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5239
5240         for (i = 0; i < bp->num_rx_rings; i++)
5241                 bnx2_init_rx_ring(bp, i);
5242
5243         if (bp->num_rx_rings > 1) {
5244                 u32 tbl_32;
5245                 u8 *tbl = (u8 *) &tbl_32;
5246
5247                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5248                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5249
5250                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5251                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5252                         if ((i % 4) == 3)
5253                                 bnx2_reg_wr_ind(bp,
5254                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5255                                                 cpu_to_be32(tbl_32));
5256                 }
5257
5258                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5259                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5260
5261                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5262
5263         }
5264 }
5265
5266 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5267 {
5268         u32 max, num_rings = 1;
5269
5270         while (ring_size > MAX_RX_DESC_CNT) {
5271                 ring_size -= MAX_RX_DESC_CNT;
5272                 num_rings++;
5273         }
5274         /* round to next power of 2 */
5275         max = max_size;
5276         while ((max & num_rings) == 0)
5277                 max >>= 1;
5278
5279         if (num_rings != max)
5280                 max <<= 1;
5281
5282         return max;
5283 }
5284
5285 static void
5286 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5287 {
5288         u32 rx_size, rx_space, jumbo_size;
5289
5290         /* 8 for CRC and VLAN */
5291         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5292
5293         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5294                 sizeof(struct skb_shared_info);
5295
5296         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5297         bp->rx_pg_ring_size = 0;
5298         bp->rx_max_pg_ring = 0;
5299         bp->rx_max_pg_ring_idx = 0;
5300         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5301                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5302
5303                 jumbo_size = size * pages;
5304                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5305                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5306
5307                 bp->rx_pg_ring_size = jumbo_size;
5308                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5309                                                         MAX_RX_PG_RINGS);
5310                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5311                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5312                 bp->rx_copy_thresh = 0;
5313         }
5314
5315         bp->rx_buf_use_size = rx_size;
5316         /* hw alignment */
5317         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5318         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5319         bp->rx_ring_size = size;
5320         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5321         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5322 }
5323
5324 static void
5325 bnx2_free_tx_skbs(struct bnx2 *bp)
5326 {
5327         int i;
5328
5329         for (i = 0; i < bp->num_tx_rings; i++) {
5330                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5331                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5332                 int j;
5333
5334                 if (txr->tx_buf_ring == NULL)
5335                         continue;
5336
5337                 for (j = 0; j < TX_DESC_CNT; ) {
5338                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5339                         struct sk_buff *skb = tx_buf->skb;
5340                         int k, last;
5341
5342                         if (skb == NULL) {
5343                                 j++;
5344                                 continue;
5345                         }
5346
5347                         dma_unmap_single(&bp->pdev->dev,
5348                                          dma_unmap_addr(tx_buf, mapping),
5349                                          skb_headlen(skb),
5350                                          PCI_DMA_TODEVICE);
5351
5352                         tx_buf->skb = NULL;
5353
5354                         last = tx_buf->nr_frags;
5355                         j++;
5356                         for (k = 0; k < last; k++, j++) {
5357                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5358                                 dma_unmap_page(&bp->pdev->dev,
5359                                         dma_unmap_addr(tx_buf, mapping),
5360                                         skb_shinfo(skb)->frags[k].size,
5361                                         PCI_DMA_TODEVICE);
5362                         }
5363                         dev_kfree_skb(skb);
5364                 }
5365         }
5366 }
5367
5368 static void
5369 bnx2_free_rx_skbs(struct bnx2 *bp)
5370 {
5371         int i;
5372
5373         for (i = 0; i < bp->num_rx_rings; i++) {
5374                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5375                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5376                 int j;
5377
5378                 if (rxr->rx_buf_ring == NULL)
5379                         return;
5380
5381                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5382                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5383                         struct sk_buff *skb = rx_buf->skb;
5384
5385                         if (skb == NULL)
5386                                 continue;
5387
5388                         dma_unmap_single(&bp->pdev->dev,
5389                                          dma_unmap_addr(rx_buf, mapping),
5390                                          bp->rx_buf_use_size,
5391                                          PCI_DMA_FROMDEVICE);
5392
5393                         rx_buf->skb = NULL;
5394
5395                         dev_kfree_skb(skb);
5396                 }
5397                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5398                         bnx2_free_rx_page(bp, rxr, j);
5399         }
5400 }
5401
5402 static void
5403 bnx2_free_skbs(struct bnx2 *bp)
5404 {
5405         bnx2_free_tx_skbs(bp);
5406         bnx2_free_rx_skbs(bp);
5407 }
5408
5409 static int
5410 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5411 {
5412         int rc;
5413
5414         rc = bnx2_reset_chip(bp, reset_code);
5415         bnx2_free_skbs(bp);
5416         if (rc)
5417                 return rc;
5418
5419         if ((rc = bnx2_init_chip(bp)) != 0)
5420                 return rc;
5421
5422         bnx2_init_all_rings(bp);
5423         return 0;
5424 }
5425
5426 static int
5427 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5428 {
5429         int rc;
5430
5431         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5432                 return rc;
5433
5434         spin_lock_bh(&bp->phy_lock);
5435         bnx2_init_phy(bp, reset_phy);
5436         bnx2_set_link(bp);
5437         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5438                 bnx2_remote_phy_event(bp);
5439         spin_unlock_bh(&bp->phy_lock);
5440         return 0;
5441 }
5442
5443 static int
5444 bnx2_shutdown_chip(struct bnx2 *bp)
5445 {
5446         u32 reset_code;
5447
5448         if (bp->flags & BNX2_FLAG_NO_WOL)
5449                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5450         else if (bp->wol)
5451                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5452         else
5453                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5454
5455         return bnx2_reset_chip(bp, reset_code);
5456 }
5457
5458 static int
5459 bnx2_test_registers(struct bnx2 *bp)
5460 {
5461         int ret;
5462         int i, is_5709;
5463         static const struct {
5464                 u16   offset;
5465                 u16   flags;
5466 #define BNX2_FL_NOT_5709        1
5467                 u32   rw_mask;
5468                 u32   ro_mask;
5469         } reg_tbl[] = {
5470                 { 0x006c, 0, 0x00000000, 0x0000003f },
5471                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5472                 { 0x0094, 0, 0x00000000, 0x00000000 },
5473
5474                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5475                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5476                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5478                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5479                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5480                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5481                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5482                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5483
5484                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5485                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5486                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5487                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5488                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5489                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5490
5491                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5492                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5493                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5494
5495                 { 0x1000, 0, 0x00000000, 0x00000001 },
5496                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5497
5498                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5499                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5500                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5501                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5502                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5503                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5504                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5505                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5506                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5507                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5508
5509                 { 0x1800, 0, 0x00000000, 0x00000001 },
5510                 { 0x1804, 0, 0x00000000, 0x00000003 },
5511
5512                 { 0x2800, 0, 0x00000000, 0x00000001 },
5513                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5514                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5515                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5516                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5517                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5518                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5519                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5520                 { 0x2840, 0, 0x00000000, 0xffffffff },
5521                 { 0x2844, 0, 0x00000000, 0xffffffff },
5522                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5523                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5524
5525                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5526                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5527
5528                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5529                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5530                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5531                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5532                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5533                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5534                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5535                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5536                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5537
5538                 { 0x5004, 0, 0x00000000, 0x0000007f },
5539                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5540
5541                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5542                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5543                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5544                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5545                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5546                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5547                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5548                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5549                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5550
5551                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5552                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5553                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5554                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5555                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5556                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5557                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5558                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5559                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5560                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5561                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5562                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5563                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5564                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5565                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5566                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5567                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5568                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5569                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5570                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5571                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5572                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5573                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5574
5575                 { 0xffff, 0, 0x00000000, 0x00000000 },
5576         };
5577
5578         ret = 0;
5579         is_5709 = 0;
5580         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5581                 is_5709 = 1;
5582
5583         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5584                 u32 offset, rw_mask, ro_mask, save_val, val;
5585                 u16 flags = reg_tbl[i].flags;
5586
5587                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5588                         continue;
5589
5590                 offset = (u32) reg_tbl[i].offset;
5591                 rw_mask = reg_tbl[i].rw_mask;
5592                 ro_mask = reg_tbl[i].ro_mask;
5593
5594                 save_val = readl(bp->regview + offset);
5595
5596                 writel(0, bp->regview + offset);
5597
5598                 val = readl(bp->regview + offset);
5599                 if ((val & rw_mask) != 0) {
5600                         goto reg_test_err;
5601                 }
5602
5603                 if ((val & ro_mask) != (save_val & ro_mask)) {
5604                         goto reg_test_err;
5605                 }
5606
5607                 writel(0xffffffff, bp->regview + offset);
5608
5609                 val = readl(bp->regview + offset);
5610                 if ((val & rw_mask) != rw_mask) {
5611                         goto reg_test_err;
5612                 }
5613
5614                 if ((val & ro_mask) != (save_val & ro_mask)) {
5615                         goto reg_test_err;
5616                 }
5617
5618                 writel(save_val, bp->regview + offset);
5619                 continue;
5620
5621 reg_test_err:
5622                 writel(save_val, bp->regview + offset);
5623                 ret = -ENODEV;
5624                 break;
5625         }
5626         return ret;
5627 }
5628
5629 static int
5630 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5631 {
5632         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5633                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5634         int i;
5635
5636         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5637                 u32 offset;
5638
5639                 for (offset = 0; offset < size; offset += 4) {
5640
5641                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5642
5643                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5644                                 test_pattern[i]) {
5645                                 return -ENODEV;
5646                         }
5647                 }
5648         }
5649         return 0;
5650 }
5651
5652 static int
5653 bnx2_test_memory(struct bnx2 *bp)
5654 {
5655         int ret = 0;
5656         int i;
5657         static struct mem_entry {
5658                 u32   offset;
5659                 u32   len;
5660         } mem_tbl_5706[] = {
5661                 { 0x60000,  0x4000 },
5662                 { 0xa0000,  0x3000 },
5663                 { 0xe0000,  0x4000 },
5664                 { 0x120000, 0x4000 },
5665                 { 0x1a0000, 0x4000 },
5666                 { 0x160000, 0x4000 },
5667                 { 0xffffffff, 0    },
5668         },
5669         mem_tbl_5709[] = {
5670                 { 0x60000,  0x4000 },
5671                 { 0xa0000,  0x3000 },
5672                 { 0xe0000,  0x4000 },
5673                 { 0x120000, 0x4000 },
5674                 { 0x1a0000, 0x4000 },
5675                 { 0xffffffff, 0    },
5676         };
5677         struct mem_entry *mem_tbl;
5678
5679         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5680                 mem_tbl = mem_tbl_5709;
5681         else
5682                 mem_tbl = mem_tbl_5706;
5683
5684         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5685                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5686                         mem_tbl[i].len)) != 0) {
5687                         return ret;
5688                 }
5689         }
5690
5691         return ret;
5692 }
5693
5694 #define BNX2_MAC_LOOPBACK       0
5695 #define BNX2_PHY_LOOPBACK       1
5696
5697 static int
5698 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5699 {
5700         unsigned int pkt_size, num_pkts, i;
5701         struct sk_buff *skb, *rx_skb;
5702         unsigned char *packet;
5703         u16 rx_start_idx, rx_idx;
5704         dma_addr_t map;
5705         struct tx_bd *txbd;
5706         struct sw_bd *rx_buf;
5707         struct l2_fhdr *rx_hdr;
5708         int ret = -ENODEV;
5709         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5710         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5711         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5712
5713         tx_napi = bnapi;
5714
5715         txr = &tx_napi->tx_ring;
5716         rxr = &bnapi->rx_ring;
5717         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5718                 bp->loopback = MAC_LOOPBACK;
5719                 bnx2_set_mac_loopback(bp);
5720         }
5721         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5722                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5723                         return 0;
5724
5725                 bp->loopback = PHY_LOOPBACK;
5726                 bnx2_set_phy_loopback(bp);
5727         }
5728         else
5729                 return -EINVAL;
5730
5731         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5732         skb = netdev_alloc_skb(bp->dev, pkt_size);
5733         if (!skb)
5734                 return -ENOMEM;
5735         packet = skb_put(skb, pkt_size);
5736         memcpy(packet, bp->dev->dev_addr, 6);
5737         memset(packet + 6, 0x0, 8);
5738         for (i = 14; i < pkt_size; i++)
5739                 packet[i] = (unsigned char) (i & 0xff);
5740
5741         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5742                              PCI_DMA_TODEVICE);
5743         if (dma_mapping_error(&bp->pdev->dev, map)) {
5744                 dev_kfree_skb(skb);
5745                 return -EIO;
5746         }
5747
5748         REG_WR(bp, BNX2_HC_COMMAND,
5749                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5750
5751         REG_RD(bp, BNX2_HC_COMMAND);
5752
5753         udelay(5);
5754         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5755
5756         num_pkts = 0;
5757
5758         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5759
5760         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5761         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5762         txbd->tx_bd_mss_nbytes = pkt_size;
5763         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5764
5765         num_pkts++;
5766         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5767         txr->tx_prod_bseq += pkt_size;
5768
5769         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5770         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5771
5772         udelay(100);
5773
5774         REG_WR(bp, BNX2_HC_COMMAND,
5775                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5776
5777         REG_RD(bp, BNX2_HC_COMMAND);
5778
5779         udelay(5);
5780
5781         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5782         dev_kfree_skb(skb);
5783
5784         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5785                 goto loopback_test_done;
5786
5787         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5788         if (rx_idx != rx_start_idx + num_pkts) {
5789                 goto loopback_test_done;
5790         }
5791
5792         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5793         rx_skb = rx_buf->skb;
5794
5795         rx_hdr = rx_buf->desc;
5796         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5797
5798         dma_sync_single_for_cpu(&bp->pdev->dev,
5799                 dma_unmap_addr(rx_buf, mapping),
5800                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5801
5802         if (rx_hdr->l2_fhdr_status &
5803                 (L2_FHDR_ERRORS_BAD_CRC |
5804                 L2_FHDR_ERRORS_PHY_DECODE |
5805                 L2_FHDR_ERRORS_ALIGNMENT |
5806                 L2_FHDR_ERRORS_TOO_SHORT |
5807                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5808
5809                 goto loopback_test_done;
5810         }
5811
5812         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5813                 goto loopback_test_done;
5814         }
5815
5816         for (i = 14; i < pkt_size; i++) {
5817                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5818                         goto loopback_test_done;
5819                 }
5820         }
5821
5822         ret = 0;
5823
5824 loopback_test_done:
5825         bp->loopback = 0;
5826         return ret;
5827 }
5828
5829 #define BNX2_MAC_LOOPBACK_FAILED        1
5830 #define BNX2_PHY_LOOPBACK_FAILED        2
5831 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5832                                          BNX2_PHY_LOOPBACK_FAILED)
5833
5834 static int
5835 bnx2_test_loopback(struct bnx2 *bp)
5836 {
5837         int rc = 0;
5838
5839         if (!netif_running(bp->dev))
5840                 return BNX2_LOOPBACK_FAILED;
5841
5842         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5843         spin_lock_bh(&bp->phy_lock);
5844         bnx2_init_phy(bp, 1);
5845         spin_unlock_bh(&bp->phy_lock);
5846         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5847                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5848         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5849                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5850         return rc;
5851 }
5852
5853 #define NVRAM_SIZE 0x200
5854 #define CRC32_RESIDUAL 0xdebb20e3
5855
5856 static int
5857 bnx2_test_nvram(struct bnx2 *bp)
5858 {
5859         __be32 buf[NVRAM_SIZE / 4];
5860         u8 *data = (u8 *) buf;
5861         int rc = 0;
5862         u32 magic, csum;
5863
5864         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5865                 goto test_nvram_done;
5866
5867         magic = be32_to_cpu(buf[0]);
5868         if (magic != 0x669955aa) {
5869                 rc = -ENODEV;
5870                 goto test_nvram_done;
5871         }
5872
5873         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5874                 goto test_nvram_done;
5875
5876         csum = ether_crc_le(0x100, data);
5877         if (csum != CRC32_RESIDUAL) {
5878                 rc = -ENODEV;
5879                 goto test_nvram_done;
5880         }
5881
5882         csum = ether_crc_le(0x100, data + 0x100);
5883         if (csum != CRC32_RESIDUAL) {
5884                 rc = -ENODEV;
5885         }
5886
5887 test_nvram_done:
5888         return rc;
5889 }
5890
5891 static int
5892 bnx2_test_link(struct bnx2 *bp)
5893 {
5894         u32 bmsr;
5895
5896         if (!netif_running(bp->dev))
5897                 return -ENODEV;
5898
5899         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5900                 if (bp->link_up)
5901                         return 0;
5902                 return -ENODEV;
5903         }
5904         spin_lock_bh(&bp->phy_lock);
5905         bnx2_enable_bmsr1(bp);
5906         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5907         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5908         bnx2_disable_bmsr1(bp);
5909         spin_unlock_bh(&bp->phy_lock);
5910
5911         if (bmsr & BMSR_LSTATUS) {
5912                 return 0;
5913         }
5914         return -ENODEV;
5915 }
5916
5917 static int
5918 bnx2_test_intr(struct bnx2 *bp)
5919 {
5920         int i;
5921         u16 status_idx;
5922
5923         if (!netif_running(bp->dev))
5924                 return -ENODEV;
5925
5926         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5927
5928         /* This register is not touched during run-time. */
5929         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5930         REG_RD(bp, BNX2_HC_COMMAND);
5931
5932         for (i = 0; i < 10; i++) {
5933                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5934                         status_idx) {
5935
5936                         break;
5937                 }
5938
5939                 msleep_interruptible(10);
5940         }
5941         if (i < 10)
5942                 return 0;
5943
5944         return -ENODEV;
5945 }
5946
5947 /* Determining link for parallel detection. */
5948 static int
5949 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5950 {
5951         u32 mode_ctl, an_dbg, exp;
5952
5953         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5954                 return 0;
5955
5956         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5957         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5958
5959         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5960                 return 0;
5961
5962         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5963         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5964         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5965
5966         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5967                 return 0;
5968
5969         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5970         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5971         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5972
5973         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5974                 return 0;
5975
5976         return 1;
5977 }
5978
5979 static void
5980 bnx2_5706_serdes_timer(struct bnx2 *bp)
5981 {
5982         int check_link = 1;
5983
5984         spin_lock(&bp->phy_lock);
5985         if (bp->serdes_an_pending) {
5986                 bp->serdes_an_pending--;
5987                 check_link = 0;
5988         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5989                 u32 bmcr;
5990
5991                 bp->current_interval = BNX2_TIMER_INTERVAL;
5992
5993                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5994
5995                 if (bmcr & BMCR_ANENABLE) {
5996                         if (bnx2_5706_serdes_has_link(bp)) {
5997                                 bmcr &= ~BMCR_ANENABLE;
5998                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5999                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6000                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6001                         }
6002                 }
6003         }
6004         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6005                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6006                 u32 phy2;
6007
6008                 bnx2_write_phy(bp, 0x17, 0x0f01);
6009                 bnx2_read_phy(bp, 0x15, &phy2);
6010                 if (phy2 & 0x20) {
6011                         u32 bmcr;
6012
6013                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6014                         bmcr |= BMCR_ANENABLE;
6015                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6016
6017                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6018                 }
6019         } else
6020                 bp->current_interval = BNX2_TIMER_INTERVAL;
6021
6022         if (check_link) {
6023                 u32 val;
6024
6025                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6026                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6027                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6028
6029                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6030                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6031                                 bnx2_5706s_force_link_dn(bp, 1);
6032       &