Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/checksum.h>
43 #include <linux/workqueue.h>
44 #include <linux/crc32.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50
51 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52 #define BCM_CNIC 1
53 #include "cnic_if.h"
54 #endif
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define DRV_MODULE_VERSION      "2.0.21"
60 #define DRV_MODULE_RELDATE      "Dec 23, 2010"
61 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.1.fw"
62 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1.fw"
64 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67 #define RUN_AT(x) (jiffies + (x))
68
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79 MODULE_FIRMWARE(FW_MIPS_FILE_06);
80 MODULE_FIRMWARE(FW_RV2P_FILE_06);
81 MODULE_FIRMWARE(FW_MIPS_FILE_09);
82 MODULE_FIRMWARE(FW_RV2P_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
84
85 static int disable_msi = 0;
86
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89
90 typedef enum {
91         BCM5706 = 0,
92         NC370T,
93         NC370I,
94         BCM5706S,
95         NC370F,
96         BCM5708,
97         BCM5708S,
98         BCM5709,
99         BCM5709S,
100         BCM5716,
101         BCM5716S,
102 } board_t;
103
104 /* indexed by board_t, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109         { "HP NC370T Multifunction Gigabit Server Adapter" },
110         { "HP NC370i Multifunction Gigabit Server Adapter" },
111         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112         { "HP NC370F Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
119         };
120
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140         { PCI_VENDOR_ID_BROADCOM, 0x163b,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142         { PCI_VENDOR_ID_BROADCOM, 0x163c,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144         { 0, }
145 };
146
147 static const struct flash_spec flash_table[] =
148 {
149 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
151         /* Slow EEPROM */
152         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155          "EEPROM - slow"},
156         /* Expansion entry 0001 */
157         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160          "Entry 0001"},
161         /* Saifun SA25F010 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166          "Non-buffered flash (128kB)"},
167         /* Saifun SA25F020 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172          "Non-buffered flash (256kB)"},
173         /* Expansion entry 0100 */
174         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 0100"},
178         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188         /* Saifun SA25F005 (non-buffered flash) */
189         /* strap, cfg1, & write1 need updates */
190         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193          "Non-buffered flash (64kB)"},
194         /* Fast EEPROM */
195         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198          "EEPROM - fast"},
199         /* Expansion entry 1001 */
200         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203          "Entry 1001"},
204         /* Expansion entry 1010 */
205         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208          "Entry 1010"},
209         /* ATMEL AT45DB011B (buffered flash) */
210         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213          "Buffered flash (128kB)"},
214         /* Expansion entry 1100 */
215         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218          "Entry 1100"},
219         /* Expansion entry 1101 */
220         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223          "Entry 1101"},
224         /* Ateml Expansion entry 1110 */
225         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228          "Entry 1110 (Atmel)"},
229         /* ATMEL AT45DB021B (buffered flash) */
230         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233          "Buffered flash (256kB)"},
234 };
235
236 static const struct flash_spec flash_5709 = {
237         .flags          = BNX2_NV_BUFFERED,
238         .page_bits      = BCM5709_FLASH_PAGE_BITS,
239         .page_size      = BCM5709_FLASH_PAGE_SIZE,
240         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
241         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
242         .name           = "5709 Buffered flash (256kB)",
243 };
244
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246
247 static void bnx2_init_napi(struct bnx2 *bp);
248 static void bnx2_del_napi(struct bnx2 *bp);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255         barrier();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return bp->tx_ring_size - diff;
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = rcu_dereference_protected(bp->cnic_ops,
439                                           lockdep_is_held(&bp->cnic_lock));
440         if (c_ops) {
441                 info.cmd = CNIC_CTL_STOP_CMD;
442                 c_ops->cnic_ctl(bp->cnic_data, &info);
443         }
444         mutex_unlock(&bp->cnic_lock);
445 }
446
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
449 {
450         struct cnic_ops *c_ops;
451         struct cnic_ctl_info info;
452
453         mutex_lock(&bp->cnic_lock);
454         c_ops = rcu_dereference_protected(bp->cnic_ops,
455                                           lockdep_is_held(&bp->cnic_lock));
456         if (c_ops) {
457                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
459
460                         bnapi->cnic_tag = bnapi->last_status_idx;
461                 }
462                 info.cmd = CNIC_CTL_START_CMD;
463                 c_ops->cnic_ctl(bp->cnic_data, &info);
464         }
465         mutex_unlock(&bp->cnic_lock);
466 }
467
468 #else
469
470 static void
471 bnx2_cnic_stop(struct bnx2 *bp)
472 {
473 }
474
475 static void
476 bnx2_cnic_start(struct bnx2 *bp)
477 {
478 }
479
480 #endif
481
482 static int
483 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
484 {
485         u32 val1;
486         int i, ret;
487
488         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
491
492                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
494
495                 udelay(40);
496         }
497
498         val1 = (bp->phy_addr << 21) | (reg << 16) |
499                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500                 BNX2_EMAC_MDIO_COMM_START_BUSY;
501         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
502
503         for (i = 0; i < 50; i++) {
504                 udelay(10);
505
506                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508                         udelay(5);
509
510                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
511                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
512
513                         break;
514                 }
515         }
516
517         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518                 *val = 0x0;
519                 ret = -EBUSY;
520         }
521         else {
522                 *val = val1;
523                 ret = 0;
524         }
525
526         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
529
530                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
532
533                 udelay(40);
534         }
535
536         return ret;
537 }
538
539 static int
540 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
541 {
542         u32 val1;
543         int i, ret;
544
545         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
548
549                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
551
552                 udelay(40);
553         }
554
555         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
559
560         for (i = 0; i < 50; i++) {
561                 udelay(10);
562
563                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
564                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565                         udelay(5);
566                         break;
567                 }
568         }
569
570         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571                 ret = -EBUSY;
572         else
573                 ret = 0;
574
575         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
578
579                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
581
582                 udelay(40);
583         }
584
585         return ret;
586 }
587
588 static void
589 bnx2_disable_int(struct bnx2 *bp)
590 {
591         int i;
592         struct bnx2_napi *bnapi;
593
594         for (i = 0; i < bp->irq_nvecs; i++) {
595                 bnapi = &bp->bnx2_napi[i];
596                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
598         }
599         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
600 }
601
602 static void
603 bnx2_enable_int(struct bnx2 *bp)
604 {
605         int i;
606         struct bnx2_napi *bnapi;
607
608         for (i = 0; i < bp->irq_nvecs; i++) {
609                 bnapi = &bp->bnx2_napi[i];
610
611                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614                        bnapi->last_status_idx);
615
616                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618                        bnapi->last_status_idx);
619         }
620         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
621 }
622
623 static void
624 bnx2_disable_int_sync(struct bnx2 *bp)
625 {
626         int i;
627
628         atomic_inc(&bp->intr_sem);
629         if (!netif_running(bp->dev))
630                 return;
631
632         bnx2_disable_int(bp);
633         for (i = 0; i < bp->irq_nvecs; i++)
634                 synchronize_irq(bp->irq_tbl[i].vector);
635 }
636
637 static void
638 bnx2_napi_disable(struct bnx2 *bp)
639 {
640         int i;
641
642         for (i = 0; i < bp->irq_nvecs; i++)
643                 napi_disable(&bp->bnx2_napi[i].napi);
644 }
645
646 static void
647 bnx2_napi_enable(struct bnx2 *bp)
648 {
649         int i;
650
651         for (i = 0; i < bp->irq_nvecs; i++)
652                 napi_enable(&bp->bnx2_napi[i].napi);
653 }
654
655 static void
656 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
657 {
658         if (stop_cnic)
659                 bnx2_cnic_stop(bp);
660         if (netif_running(bp->dev)) {
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663         }
664         bnx2_disable_int_sync(bp);
665         netif_carrier_off(bp->dev);     /* prevent tx timeout */
666 }
667
668 static void
669 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
670 {
671         if (atomic_dec_and_test(&bp->intr_sem)) {
672                 if (netif_running(bp->dev)) {
673                         netif_tx_wake_all_queues(bp->dev);
674                         spin_lock_bh(&bp->phy_lock);
675                         if (bp->link_up)
676                                 netif_carrier_on(bp->dev);
677                         spin_unlock_bh(&bp->phy_lock);
678                         bnx2_napi_enable(bp);
679                         bnx2_enable_int(bp);
680                         if (start_cnic)
681                                 bnx2_cnic_start(bp);
682                 }
683         }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689         int i;
690
691         for (i = 0; i < bp->num_tx_rings; i++) {
692                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695                 if (txr->tx_desc_ring) {
696                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
697                                           txr->tx_desc_ring,
698                                           txr->tx_desc_mapping);
699                         txr->tx_desc_ring = NULL;
700                 }
701                 kfree(txr->tx_buf_ring);
702                 txr->tx_buf_ring = NULL;
703         }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709         int i;
710
711         for (i = 0; i < bp->num_rx_rings; i++) {
712                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714                 int j;
715
716                 for (j = 0; j < bp->rx_max_ring; j++) {
717                         if (rxr->rx_desc_ring[j])
718                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
719                                                   rxr->rx_desc_ring[j],
720                                                   rxr->rx_desc_mapping[j]);
721                         rxr->rx_desc_ring[j] = NULL;
722                 }
723                 vfree(rxr->rx_buf_ring);
724                 rxr->rx_buf_ring = NULL;
725
726                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727                         if (rxr->rx_pg_desc_ring[j])
728                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729                                                   rxr->rx_pg_desc_ring[j],
730                                                   rxr->rx_pg_desc_mapping[j]);
731                         rxr->rx_pg_desc_ring[j] = NULL;
732                 }
733                 vfree(rxr->rx_pg_ring);
734                 rxr->rx_pg_ring = NULL;
735         }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741         int i;
742
743         for (i = 0; i < bp->num_tx_rings; i++) {
744                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748                 if (txr->tx_buf_ring == NULL)
749                         return -ENOMEM;
750
751                 txr->tx_desc_ring =
752                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
753                                            &txr->tx_desc_mapping, GFP_KERNEL);
754                 if (txr->tx_desc_ring == NULL)
755                         return -ENOMEM;
756         }
757         return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763         int i;
764
765         for (i = 0; i < bp->num_rx_rings; i++) {
766                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768                 int j;
769
770                 rxr->rx_buf_ring =
771                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772                 if (rxr->rx_buf_ring == NULL)
773                         return -ENOMEM;
774
775                 for (j = 0; j < bp->rx_max_ring; j++) {
776                         rxr->rx_desc_ring[j] =
777                                 dma_alloc_coherent(&bp->pdev->dev,
778                                                    RXBD_RING_SIZE,
779                                                    &rxr->rx_desc_mapping[j],
780                                                    GFP_KERNEL);
781                         if (rxr->rx_desc_ring[j] == NULL)
782                                 return -ENOMEM;
783
784                 }
785
786                 if (bp->rx_pg_ring_size) {
787                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
788                                                   bp->rx_max_pg_ring);
789                         if (rxr->rx_pg_ring == NULL)
790                                 return -ENOMEM;
791
792                 }
793
794                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
795                         rxr->rx_pg_desc_ring[j] =
796                                 dma_alloc_coherent(&bp->pdev->dev,
797                                                    RXBD_RING_SIZE,
798                                                    &rxr->rx_pg_desc_mapping[j],
799                                                    GFP_KERNEL);
800                         if (rxr->rx_pg_desc_ring[j] == NULL)
801                                 return -ENOMEM;
802
803                 }
804         }
805         return 0;
806 }
807
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
810 {
811         int i;
812         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
813
814         bnx2_free_tx_mem(bp);
815         bnx2_free_rx_mem(bp);
816
817         for (i = 0; i < bp->ctx_pages; i++) {
818                 if (bp->ctx_blk[i]) {
819                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
820                                           bp->ctx_blk[i],
821                                           bp->ctx_blk_mapping[i]);
822                         bp->ctx_blk[i] = NULL;
823                 }
824         }
825         if (bnapi->status_blk.msi) {
826                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
827                                   bnapi->status_blk.msi,
828                                   bp->status_blk_mapping);
829                 bnapi->status_blk.msi = NULL;
830                 bp->stats_blk = NULL;
831         }
832 }
833
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
836 {
837         int i, status_blk_size, err;
838         struct bnx2_napi *bnapi;
839         void *status_blk;
840
841         /* Combine status and statistics blocks into one allocation. */
842         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843         if (bp->flags & BNX2_FLAG_MSIX_CAP)
844                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
846         bp->status_stats_size = status_blk_size +
847                                 sizeof(struct statistics_block);
848
849         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
850                                         &bp->status_blk_mapping, GFP_KERNEL);
851         if (status_blk == NULL)
852                 goto alloc_mem_err;
853
854         memset(status_blk, 0, bp->status_stats_size);
855
856         bnapi = &bp->bnx2_napi[0];
857         bnapi->status_blk.msi = status_blk;
858         bnapi->hw_tx_cons_ptr =
859                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860         bnapi->hw_rx_cons_ptr =
861                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863                 for (i = 1; i < bp->irq_nvecs; i++) {
864                         struct status_block_msix *sblk;
865
866                         bnapi = &bp->bnx2_napi[i];
867
868                         sblk = (void *) (status_blk +
869                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870                         bnapi->status_blk.msix = sblk;
871                         bnapi->hw_tx_cons_ptr =
872                                 &sblk->status_tx_quick_consumer_index;
873                         bnapi->hw_rx_cons_ptr =
874                                 &sblk->status_rx_quick_consumer_index;
875                         bnapi->int_num = i << 24;
876                 }
877         }
878
879         bp->stats_blk = status_blk + status_blk_size;
880
881         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
882
883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885                 if (bp->ctx_pages == 0)
886                         bp->ctx_pages = 1;
887                 for (i = 0; i < bp->ctx_pages; i++) {
888                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
889                                                 BCM_PAGE_SIZE,
890                                                 &bp->ctx_blk_mapping[i],
891                                                 GFP_KERNEL);
892                         if (bp->ctx_blk[i] == NULL)
893                                 goto alloc_mem_err;
894                 }
895         }
896
897         err = bnx2_alloc_rx_mem(bp);
898         if (err)
899                 goto alloc_mem_err;
900
901         err = bnx2_alloc_tx_mem(bp);
902         if (err)
903                 goto alloc_mem_err;
904
905         return 0;
906
907 alloc_mem_err:
908         bnx2_free_mem(bp);
909         return -ENOMEM;
910 }
911
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
914 {
915         u32 fw_link_status = 0;
916
917         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918                 return;
919
920         if (bp->link_up) {
921                 u32 bmsr;
922
923                 switch (bp->line_speed) {
924                 case SPEED_10:
925                         if (bp->duplex == DUPLEX_HALF)
926                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
927                         else
928                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
929                         break;
930                 case SPEED_100:
931                         if (bp->duplex == DUPLEX_HALF)
932                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
933                         else
934                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
935                         break;
936                 case SPEED_1000:
937                         if (bp->duplex == DUPLEX_HALF)
938                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939                         else
940                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941                         break;
942                 case SPEED_2500:
943                         if (bp->duplex == DUPLEX_HALF)
944                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945                         else
946                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947                         break;
948                 }
949
950                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
951
952                 if (bp->autoneg) {
953                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954
955                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957
958                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961                         else
962                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
963                 }
964         }
965         else
966                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967
968         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
969 }
970
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
973 {
974         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
975                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976                  "Copper");
977 }
978
979 static void
980 bnx2_report_link(struct bnx2 *bp)
981 {
982         if (bp->link_up) {
983                 netif_carrier_on(bp->dev);
984                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985                             bnx2_xceiver_str(bp),
986                             bp->line_speed,
987                             bp->duplex == DUPLEX_FULL ? "full" : "half");
988
989                 if (bp->flow_ctrl) {
990                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
991                                 pr_cont(", receive ");
992                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
993                                         pr_cont("& transmit ");
994                         }
995                         else {
996                                 pr_cont(", transmit ");
997                         }
998                         pr_cont("flow control ON");
999                 }
1000                 pr_cont("\n");
1001         } else {
1002                 netif_carrier_off(bp->dev);
1003                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004                            bnx2_xceiver_str(bp));
1005         }
1006
1007         bnx2_report_fw_link(bp);
1008 }
1009
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1012 {
1013         u32 local_adv, remote_adv;
1014
1015         bp->flow_ctrl = 0;
1016         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1018
1019                 if (bp->duplex == DUPLEX_FULL) {
1020                         bp->flow_ctrl = bp->req_flow_ctrl;
1021                 }
1022                 return;
1023         }
1024
1025         if (bp->duplex != DUPLEX_FULL) {
1026                 return;
1027         }
1028
1029         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031                 u32 val;
1032
1033                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035                         bp->flow_ctrl |= FLOW_CTRL_TX;
1036                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037                         bp->flow_ctrl |= FLOW_CTRL_RX;
1038                 return;
1039         }
1040
1041         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045                 u32 new_local_adv = 0;
1046                 u32 new_remote_adv = 0;
1047
1048                 if (local_adv & ADVERTISE_1000XPAUSE)
1049                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1050                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052                 if (remote_adv & ADVERTISE_1000XPAUSE)
1053                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1056
1057                 local_adv = new_local_adv;
1058                 remote_adv = new_remote_adv;
1059         }
1060
1061         /* See Table 28B-3 of 802.3ab-1999 spec. */
1062         if (local_adv & ADVERTISE_PAUSE_CAP) {
1063                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066                         }
1067                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068                                 bp->flow_ctrl = FLOW_CTRL_RX;
1069                         }
1070                 }
1071                 else {
1072                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1074                         }
1075                 }
1076         }
1077         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1080
1081                         bp->flow_ctrl = FLOW_CTRL_TX;
1082                 }
1083         }
1084 }
1085
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1088 {
1089         u32 val, speed;
1090
1091         bp->link_up = 1;
1092
1093         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1096
1097         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098                 bp->line_speed = bp->req_line_speed;
1099                 bp->duplex = bp->req_duplex;
1100                 return 0;
1101         }
1102         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103         switch (speed) {
1104                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105                         bp->line_speed = SPEED_10;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108                         bp->line_speed = SPEED_100;
1109                         break;
1110                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112                         bp->line_speed = SPEED_1000;
1113                         break;
1114                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115                         bp->line_speed = SPEED_2500;
1116                         break;
1117         }
1118         if (val & MII_BNX2_GP_TOP_AN_FD)
1119                 bp->duplex = DUPLEX_FULL;
1120         else
1121                 bp->duplex = DUPLEX_HALF;
1122         return 0;
1123 }
1124
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1127 {
1128         u32 val;
1129
1130         bp->link_up = 1;
1131         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133                 case BCM5708S_1000X_STAT1_SPEED_10:
1134                         bp->line_speed = SPEED_10;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_100:
1137                         bp->line_speed = SPEED_100;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_1G:
1140                         bp->line_speed = SPEED_1000;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143                         bp->line_speed = SPEED_2500;
1144                         break;
1145         }
1146         if (val & BCM5708S_1000X_STAT1_FD)
1147                 bp->duplex = DUPLEX_FULL;
1148         else
1149                 bp->duplex = DUPLEX_HALF;
1150
1151         return 0;
1152 }
1153
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1156 {
1157         u32 bmcr, local_adv, remote_adv, common;
1158
1159         bp->link_up = 1;
1160         bp->line_speed = SPEED_1000;
1161
1162         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163         if (bmcr & BMCR_FULLDPLX) {
1164                 bp->duplex = DUPLEX_FULL;
1165         }
1166         else {
1167                 bp->duplex = DUPLEX_HALF;
1168         }
1169
1170         if (!(bmcr & BMCR_ANENABLE)) {
1171                 return 0;
1172         }
1173
1174         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1176
1177         common = local_adv & remote_adv;
1178         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1179
1180                 if (common & ADVERTISE_1000XFULL) {
1181                         bp->duplex = DUPLEX_FULL;
1182                 }
1183                 else {
1184                         bp->duplex = DUPLEX_HALF;
1185                 }
1186         }
1187
1188         return 0;
1189 }
1190
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1193 {
1194         u32 bmcr;
1195
1196         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197         if (bmcr & BMCR_ANENABLE) {
1198                 u32 local_adv, remote_adv, common;
1199
1200                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1202
1203                 common = local_adv & (remote_adv >> 2);
1204                 if (common & ADVERTISE_1000FULL) {
1205                         bp->line_speed = SPEED_1000;
1206                         bp->duplex = DUPLEX_FULL;
1207                 }
1208                 else if (common & ADVERTISE_1000HALF) {
1209                         bp->line_speed = SPEED_1000;
1210                         bp->duplex = DUPLEX_HALF;
1211                 }
1212                 else {
1213                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1215
1216                         common = local_adv & remote_adv;
1217                         if (common & ADVERTISE_100FULL) {
1218                                 bp->line_speed = SPEED_100;
1219                                 bp->duplex = DUPLEX_FULL;
1220                         }
1221                         else if (common & ADVERTISE_100HALF) {
1222                                 bp->line_speed = SPEED_100;
1223                                 bp->duplex = DUPLEX_HALF;
1224                         }
1225                         else if (common & ADVERTISE_10FULL) {
1226                                 bp->line_speed = SPEED_10;
1227                                 bp->duplex = DUPLEX_FULL;
1228                         }
1229                         else if (common & ADVERTISE_10HALF) {
1230                                 bp->line_speed = SPEED_10;
1231                                 bp->duplex = DUPLEX_HALF;
1232                         }
1233                         else {
1234                                 bp->line_speed = 0;
1235                                 bp->link_up = 0;
1236                         }
1237                 }
1238         }
1239         else {
1240                 if (bmcr & BMCR_SPEED100) {
1241                         bp->line_speed = SPEED_100;
1242                 }
1243                 else {
1244                         bp->line_speed = SPEED_10;
1245                 }
1246                 if (bmcr & BMCR_FULLDPLX) {
1247                         bp->duplex = DUPLEX_FULL;
1248                 }
1249                 else {
1250                         bp->duplex = DUPLEX_HALF;
1251                 }
1252         }
1253
1254         return 0;
1255 }
1256
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1259 {
1260         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1261
1262         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264         val |= 0x02 << 8;
1265
1266         if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1268
1269         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1270 }
1271
1272 static void
1273 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1274 {
1275         int i;
1276         u32 cid;
1277
1278         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1279                 if (i == 1)
1280                         cid = RX_RSS_CID;
1281                 bnx2_init_rx_context(bp, cid);
1282         }
1283 }
1284
1285 static void
1286 bnx2_set_mac_link(struct bnx2 *bp)
1287 {
1288         u32 val;
1289
1290         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1291         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1292                 (bp->duplex == DUPLEX_HALF)) {
1293                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1294         }
1295
1296         /* Configure the EMAC mode register. */
1297         val = REG_RD(bp, BNX2_EMAC_MODE);
1298
1299         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1300                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1301                 BNX2_EMAC_MODE_25G_MODE);
1302
1303         if (bp->link_up) {
1304                 switch (bp->line_speed) {
1305                         case SPEED_10:
1306                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1307                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1308                                         break;
1309                                 }
1310                                 /* fall through */
1311                         case SPEED_100:
1312                                 val |= BNX2_EMAC_MODE_PORT_MII;
1313                                 break;
1314                         case SPEED_2500:
1315                                 val |= BNX2_EMAC_MODE_25G_MODE;
1316                                 /* fall through */
1317                         case SPEED_1000:
1318                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1319                                 break;
1320                 }
1321         }
1322         else {
1323                 val |= BNX2_EMAC_MODE_PORT_GMII;
1324         }
1325
1326         /* Set the MAC to operate in the appropriate duplex mode. */
1327         if (bp->duplex == DUPLEX_HALF)
1328                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1329         REG_WR(bp, BNX2_EMAC_MODE, val);
1330
1331         /* Enable/disable rx PAUSE. */
1332         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1333
1334         if (bp->flow_ctrl & FLOW_CTRL_RX)
1335                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1336         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1337
1338         /* Enable/disable tx PAUSE. */
1339         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1340         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1341
1342         if (bp->flow_ctrl & FLOW_CTRL_TX)
1343                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1344         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1345
1346         /* Acknowledge the interrupt. */
1347         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1348
1349         bnx2_init_all_rx_contexts(bp);
1350 }
1351
1352 static void
1353 bnx2_enable_bmsr1(struct bnx2 *bp)
1354 {
1355         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1356             (CHIP_NUM(bp) == CHIP_NUM_5709))
1357                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1358                                MII_BNX2_BLK_ADDR_GP_STATUS);
1359 }
1360
1361 static void
1362 bnx2_disable_bmsr1(struct bnx2 *bp)
1363 {
1364         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1365             (CHIP_NUM(bp) == CHIP_NUM_5709))
1366                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1367                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1368 }
1369
1370 static int
1371 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1372 {
1373         u32 up1;
1374         int ret = 1;
1375
1376         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1377                 return 0;
1378
1379         if (bp->autoneg & AUTONEG_SPEED)
1380                 bp->advertising |= ADVERTISED_2500baseX_Full;
1381
1382         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1383                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1384
1385         bnx2_read_phy(bp, bp->mii_up1, &up1);
1386         if (!(up1 & BCM5708S_UP1_2G5)) {
1387                 up1 |= BCM5708S_UP1_2G5;
1388                 bnx2_write_phy(bp, bp->mii_up1, up1);
1389                 ret = 0;
1390         }
1391
1392         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1393                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1395
1396         return ret;
1397 }
1398
1399 static int
1400 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1401 {
1402         u32 up1;
1403         int ret = 0;
1404
1405         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1406                 return 0;
1407
1408         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1409                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1410
1411         bnx2_read_phy(bp, bp->mii_up1, &up1);
1412         if (up1 & BCM5708S_UP1_2G5) {
1413                 up1 &= ~BCM5708S_UP1_2G5;
1414                 bnx2_write_phy(bp, bp->mii_up1, up1);
1415                 ret = 1;
1416         }
1417
1418         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1419                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1420                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1421
1422         return ret;
1423 }
1424
1425 static void
1426 bnx2_enable_forced_2g5(struct bnx2 *bp)
1427 {
1428         u32 uninitialized_var(bmcr);
1429         int err;
1430
1431         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1432                 return;
1433
1434         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1435                 u32 val;
1436
1437                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1438                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1439                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1440                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1441                         val |= MII_BNX2_SD_MISC1_FORCE |
1442                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1443                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1444                 }
1445
1446                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1447                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1448                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1449
1450         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1451                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1452                 if (!err)
1453                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1454         } else {
1455                 return;
1456         }
1457
1458         if (err)
1459                 return;
1460
1461         if (bp->autoneg & AUTONEG_SPEED) {
1462                 bmcr &= ~BMCR_ANENABLE;
1463                 if (bp->req_duplex == DUPLEX_FULL)
1464                         bmcr |= BMCR_FULLDPLX;
1465         }
1466         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1467 }
1468
1469 static void
1470 bnx2_disable_forced_2g5(struct bnx2 *bp)
1471 {
1472         u32 uninitialized_var(bmcr);
1473         int err;
1474
1475         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1476                 return;
1477
1478         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1479                 u32 val;
1480
1481                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1482                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1483                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1484                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1485                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1486                 }
1487
1488                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1490                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1491
1492         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1493                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1494                 if (!err)
1495                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1496         } else {
1497                 return;
1498         }
1499
1500         if (err)
1501                 return;
1502
1503         if (bp->autoneg & AUTONEG_SPEED)
1504                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1505         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1506 }
1507
1508 static void
1509 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1510 {
1511         u32 val;
1512
1513         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1514         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1515         if (start)
1516                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1517         else
1518                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1519 }
1520
1521 static int
1522 bnx2_set_link(struct bnx2 *bp)
1523 {
1524         u32 bmsr;
1525         u8 link_up;
1526
1527         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1528                 bp->link_up = 1;
1529                 return 0;
1530         }
1531
1532         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1533                 return 0;
1534
1535         link_up = bp->link_up;
1536
1537         bnx2_enable_bmsr1(bp);
1538         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1539         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1540         bnx2_disable_bmsr1(bp);
1541
1542         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1543             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1544                 u32 val, an_dbg;
1545
1546                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1547                         bnx2_5706s_force_link_dn(bp, 0);
1548                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1549                 }
1550                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1551
1552                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1553                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1554                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1555
1556                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1557                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1558                         bmsr |= BMSR_LSTATUS;
1559                 else
1560                         bmsr &= ~BMSR_LSTATUS;
1561         }
1562
1563         if (bmsr & BMSR_LSTATUS) {
1564                 bp->link_up = 1;
1565
1566                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1567                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1568                                 bnx2_5706s_linkup(bp);
1569                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1570                                 bnx2_5708s_linkup(bp);
1571                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1572                                 bnx2_5709s_linkup(bp);
1573                 }
1574                 else {
1575                         bnx2_copper_linkup(bp);
1576                 }
1577                 bnx2_resolve_flow_ctrl(bp);
1578         }
1579         else {
1580                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1581                     (bp->autoneg & AUTONEG_SPEED))
1582                         bnx2_disable_forced_2g5(bp);
1583
1584                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1585                         u32 bmcr;
1586
1587                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588                         bmcr |= BMCR_ANENABLE;
1589                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1590
1591                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1592                 }
1593                 bp->link_up = 0;
1594         }
1595
1596         if (bp->link_up != link_up) {
1597                 bnx2_report_link(bp);
1598         }
1599
1600         bnx2_set_mac_link(bp);
1601
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_reset_phy(struct bnx2 *bp)
1607 {
1608         int i;
1609         u32 reg;
1610
1611         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1612
1613 #define PHY_RESET_MAX_WAIT 100
1614         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1615                 udelay(10);
1616
1617                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1618                 if (!(reg & BMCR_RESET)) {
1619                         udelay(20);
1620                         break;
1621                 }
1622         }
1623         if (i == PHY_RESET_MAX_WAIT) {
1624                 return -EBUSY;
1625         }
1626         return 0;
1627 }
1628
1629 static u32
1630 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1631 {
1632         u32 adv = 0;
1633
1634         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1635                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1636
1637                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1638                         adv = ADVERTISE_1000XPAUSE;
1639                 }
1640                 else {
1641                         adv = ADVERTISE_PAUSE_CAP;
1642                 }
1643         }
1644         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1645                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1646                         adv = ADVERTISE_1000XPSE_ASYM;
1647                 }
1648                 else {
1649                         adv = ADVERTISE_PAUSE_ASYM;
1650                 }
1651         }
1652         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1653                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1655                 }
1656                 else {
1657                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1658                 }
1659         }
1660         return adv;
1661 }
1662
1663 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1664
1665 static int
1666 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1667 __releases(&bp->phy_lock)
1668 __acquires(&bp->phy_lock)
1669 {
1670         u32 speed_arg = 0, pause_adv;
1671
1672         pause_adv = bnx2_phy_get_pause_adv(bp);
1673
1674         if (bp->autoneg & AUTONEG_SPEED) {
1675                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1676                 if (bp->advertising & ADVERTISED_10baseT_Half)
1677                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1678                 if (bp->advertising & ADVERTISED_10baseT_Full)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1680                 if (bp->advertising & ADVERTISED_100baseT_Half)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1682                 if (bp->advertising & ADVERTISED_100baseT_Full)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1684                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1686                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688         } else {
1689                 if (bp->req_line_speed == SPEED_2500)
1690                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1691                 else if (bp->req_line_speed == SPEED_1000)
1692                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 else if (bp->req_line_speed == SPEED_100) {
1694                         if (bp->req_duplex == DUPLEX_FULL)
1695                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1696                         else
1697                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1698                 } else if (bp->req_line_speed == SPEED_10) {
1699                         if (bp->req_duplex == DUPLEX_FULL)
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                         else
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1703                 }
1704         }
1705
1706         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1708         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1709                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1710
1711         if (port == PORT_TP)
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1713                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1714
1715         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1716
1717         spin_unlock_bh(&bp->phy_lock);
1718         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1719         spin_lock_bh(&bp->phy_lock);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1726 __releases(&bp->phy_lock)
1727 __acquires(&bp->phy_lock)
1728 {
1729         u32 adv, bmcr;
1730         u32 new_adv = 0;
1731
1732         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1733                 return bnx2_setup_remote_phy(bp, port);
1734
1735         if (!(bp->autoneg & AUTONEG_SPEED)) {
1736                 u32 new_bmcr;
1737                 int force_link_down = 0;
1738
1739                 if (bp->req_line_speed == SPEED_2500) {
1740                         if (!bnx2_test_and_enable_2g5(bp))
1741                                 force_link_down = 1;
1742                 } else if (bp->req_line_speed == SPEED_1000) {
1743                         if (bnx2_test_and_disable_2g5(bp))
1744                                 force_link_down = 1;
1745                 }
1746                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1747                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1748
1749                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1750                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1751                 new_bmcr |= BMCR_SPEED1000;
1752
1753                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1754                         if (bp->req_line_speed == SPEED_2500)
1755                                 bnx2_enable_forced_2g5(bp);
1756                         else if (bp->req_line_speed == SPEED_1000) {
1757                                 bnx2_disable_forced_2g5(bp);
1758                                 new_bmcr &= ~0x2000;
1759                         }
1760
1761                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1762                         if (bp->req_line_speed == SPEED_2500)
1763                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1764                         else
1765                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1766                 }
1767
1768                 if (bp->req_duplex == DUPLEX_FULL) {
1769                         adv |= ADVERTISE_1000XFULL;
1770                         new_bmcr |= BMCR_FULLDPLX;
1771                 }
1772                 else {
1773                         adv |= ADVERTISE_1000XHALF;
1774                         new_bmcr &= ~BMCR_FULLDPLX;
1775                 }
1776                 if ((new_bmcr != bmcr) || (force_link_down)) {
1777                         /* Force a link down visible on the other side */
1778                         if (bp->link_up) {
1779                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1780                                                ~(ADVERTISE_1000XFULL |
1781                                                  ADVERTISE_1000XHALF));
1782                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1783                                         BMCR_ANRESTART | BMCR_ANENABLE);
1784
1785                                 bp->link_up = 0;
1786                                 netif_carrier_off(bp->dev);
1787                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788                                 bnx2_report_link(bp);
1789                         }
1790                         bnx2_write_phy(bp, bp->mii_adv, adv);
1791                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792                 } else {
1793                         bnx2_resolve_flow_ctrl(bp);
1794                         bnx2_set_mac_link(bp);
1795                 }
1796                 return 0;
1797         }
1798
1799         bnx2_test_and_enable_2g5(bp);
1800
1801         if (bp->advertising & ADVERTISED_1000baseT_Full)
1802                 new_adv |= ADVERTISE_1000XFULL;
1803
1804         new_adv |= bnx2_phy_get_pause_adv(bp);
1805
1806         bnx2_read_phy(bp, bp->mii_adv, &adv);
1807         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1808
1809         bp->serdes_an_pending = 0;
1810         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1811                 /* Force a link down visible on the other side */
1812                 if (bp->link_up) {
1813                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1814                         spin_unlock_bh(&bp->phy_lock);
1815                         msleep(20);
1816                         spin_lock_bh(&bp->phy_lock);
1817                 }
1818
1819                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1820                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1821                         BMCR_ANENABLE);
1822                 /* Speed up link-up time when the link partner
1823                  * does not autonegotiate which is very common
1824                  * in blade servers. Some blade servers use
1825                  * IPMI for kerboard input and it's important
1826                  * to minimize link disruptions. Autoneg. involves
1827                  * exchanging base pages plus 3 next pages and
1828                  * normally completes in about 120 msec.
1829                  */
1830                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1831                 bp->serdes_an_pending = 1;
1832                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1833         } else {
1834                 bnx2_resolve_flow_ctrl(bp);
1835                 bnx2_set_mac_link(bp);
1836         }
1837
1838         return 0;
1839 }
1840
1841 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1842         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1843                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1844                 (ADVERTISED_1000baseT_Full)
1845
1846 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1847         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1848         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1849         ADVERTISED_1000baseT_Full)
1850
1851 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1852         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1853
1854 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1855
1856 static void
1857 bnx2_set_default_remote_link(struct bnx2 *bp)
1858 {
1859         u32 link;
1860
1861         if (bp->phy_port == PORT_TP)
1862                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1863         else
1864                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1865
1866         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1867                 bp->req_line_speed = 0;
1868                 bp->autoneg |= AUTONEG_SPEED;
1869                 bp->advertising = ADVERTISED_Autoneg;
1870                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1871                         bp->advertising |= ADVERTISED_10baseT_Half;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1873                         bp->advertising |= ADVERTISED_10baseT_Full;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1875                         bp->advertising |= ADVERTISED_100baseT_Half;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1877                         bp->advertising |= ADVERTISED_100baseT_Full;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1879                         bp->advertising |= ADVERTISED_1000baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1881                         bp->advertising |= ADVERTISED_2500baseX_Full;
1882         } else {
1883                 bp->autoneg = 0;
1884                 bp->advertising = 0;
1885                 bp->req_duplex = DUPLEX_FULL;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1887                         bp->req_line_speed = SPEED_10;
1888                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1889                                 bp->req_duplex = DUPLEX_HALF;
1890                 }
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1892                         bp->req_line_speed = SPEED_100;
1893                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1894                                 bp->req_duplex = DUPLEX_HALF;
1895                 }
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1897                         bp->req_line_speed = SPEED_1000;
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1899                         bp->req_line_speed = SPEED_2500;
1900         }
1901 }
1902
1903 static void
1904 bnx2_set_default_link(struct bnx2 *bp)
1905 {
1906         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1907                 bnx2_set_default_remote_link(bp);
1908                 return;
1909         }
1910
1911         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1912         bp->req_line_speed = 0;
1913         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1914                 u32 reg;
1915
1916                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1917
1918                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1919                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1920                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1921                         bp->autoneg = 0;
1922                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1923                         bp->req_duplex = DUPLEX_FULL;
1924                 }
1925         } else
1926                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1927 }
1928
1929 static void
1930 bnx2_send_heart_beat(struct bnx2 *bp)
1931 {
1932         u32 msg;
1933         u32 addr;
1934
1935         spin_lock(&bp->indirect_lock);
1936         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1937         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1938         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1939         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1940         spin_unlock(&bp->indirect_lock);
1941 }
1942
1943 static void
1944 bnx2_remote_phy_event(struct bnx2 *bp)
1945 {
1946         u32 msg;
1947         u8 link_up = bp->link_up;
1948         u8 old_port;
1949
1950         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1951
1952         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1953                 bnx2_send_heart_beat(bp);
1954
1955         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1956
1957         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1958                 bp->link_up = 0;
1959         else {
1960                 u32 speed;
1961
1962                 bp->link_up = 1;
1963                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1964                 bp->duplex = DUPLEX_FULL;
1965                 switch (speed) {
1966                         case BNX2_LINK_STATUS_10HALF:
1967                                 bp->duplex = DUPLEX_HALF;
1968                         case BNX2_LINK_STATUS_10FULL:
1969                                 bp->line_speed = SPEED_10;
1970                                 break;
1971                         case BNX2_LINK_STATUS_100HALF:
1972                                 bp->duplex = DUPLEX_HALF;
1973                         case BNX2_LINK_STATUS_100BASE_T4:
1974                         case BNX2_LINK_STATUS_100FULL:
1975                                 bp->line_speed = SPEED_100;
1976                                 break;
1977                         case BNX2_LINK_STATUS_1000HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_1000FULL:
1980                                 bp->line_speed = SPEED_1000;
1981                                 break;
1982                         case BNX2_LINK_STATUS_2500HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_2500FULL:
1985                                 bp->line_speed = SPEED_2500;
1986                                 break;
1987                         default:
1988                                 bp->line_speed = 0;
1989                                 break;
1990                 }
1991
1992                 bp->flow_ctrl = 0;
1993                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1994                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1995                         if (bp->duplex == DUPLEX_FULL)
1996                                 bp->flow_ctrl = bp->req_flow_ctrl;
1997                 } else {
1998                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1999                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2000                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2001                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2002                 }
2003
2004                 old_port = bp->phy_port;
2005                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2006                         bp->phy_port = PORT_FIBRE;
2007                 else
2008                         bp->phy_port = PORT_TP;
2009
2010                 if (old_port != bp->phy_port)
2011                         bnx2_set_default_link(bp);
2012
2013         }
2014         if (bp->link_up != link_up)
2015                 bnx2_report_link(bp);
2016
2017         bnx2_set_mac_link(bp);
2018 }
2019
2020 static int
2021 bnx2_set_remote_link(struct bnx2 *bp)
2022 {
2023         u32 evt_code;
2024
2025         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2026         switch (evt_code) {
2027                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2028                         bnx2_remote_phy_event(bp);
2029                         break;
2030                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2031                 default:
2032                         bnx2_send_heart_beat(bp);
2033                         break;
2034         }
2035         return 0;
2036 }
2037
2038 static int
2039 bnx2_setup_copper_phy(struct bnx2 *bp)
2040 __releases(&bp->phy_lock)
2041 __acquires(&bp->phy_lock)
2042 {
2043         u32 bmcr;
2044         u32 new_bmcr;
2045
2046         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2047
2048         if (bp->autoneg & AUTONEG_SPEED) {
2049                 u32 adv_reg, adv1000_reg;
2050                 u32 new_adv_reg = 0;
2051                 u32 new_adv1000_reg = 0;
2052
2053                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2054                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2055                         ADVERTISE_PAUSE_ASYM);
2056
2057                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2058                 adv1000_reg &= PHY_ALL_1000_SPEED;
2059
2060                 if (bp->advertising & ADVERTISED_10baseT_Half)
2061                         new_adv_reg |= ADVERTISE_10HALF;
2062                 if (bp->advertising & ADVERTISED_10baseT_Full)
2063                         new_adv_reg |= ADVERTISE_10FULL;
2064                 if (bp->advertising & ADVERTISED_100baseT_Half)
2065                         new_adv_reg |= ADVERTISE_100HALF;
2066                 if (bp->advertising & ADVERTISED_100baseT_Full)
2067                         new_adv_reg |= ADVERTISE_100FULL;
2068                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2069                         new_adv1000_reg |= ADVERTISE_1000FULL;
2070
2071                 new_adv_reg |= ADVERTISE_CSMA;
2072
2073                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2074
2075                 if ((adv1000_reg != new_adv1000_reg) ||
2076                         (adv_reg != new_adv_reg) ||
2077                         ((bmcr & BMCR_ANENABLE) == 0)) {
2078
2079                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2080                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2081                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082                                 BMCR_ANENABLE);
2083                 }
2084                 else if (bp->link_up) {
2085                         /* Flow ctrl may have changed from auto to forced */
2086                         /* or vice-versa. */
2087
2088                         bnx2_resolve_flow_ctrl(bp);
2089                         bnx2_set_mac_link(bp);
2090                 }
2091                 return 0;
2092         }
2093
2094         new_bmcr = 0;
2095         if (bp->req_line_speed == SPEED_100) {
2096                 new_bmcr |= BMCR_SPEED100;
2097         }
2098         if (bp->req_duplex == DUPLEX_FULL) {
2099                 new_bmcr |= BMCR_FULLDPLX;
2100         }
2101         if (new_bmcr != bmcr) {
2102                 u32 bmsr;
2103
2104                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106
2107                 if (bmsr & BMSR_LSTATUS) {
2108                         /* Force link down */
2109                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110                         spin_unlock_bh(&bp->phy_lock);
2111                         msleep(50);
2112                         spin_lock_bh(&bp->phy_lock);
2113
2114                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 }
2117
2118                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119
2120                 /* Normally, the new speed is setup after the link has
2121                  * gone down and up again. In some cases, link will not go
2122                  * down so we need to set up the new speed here.
2123                  */
2124                 if (bmsr & BMSR_LSTATUS) {
2125                         bp->line_speed = bp->req_line_speed;
2126                         bp->duplex = bp->req_duplex;
2127                         bnx2_resolve_flow_ctrl(bp);
2128                         bnx2_set_mac_link(bp);
2129                 }
2130         } else {
2131                 bnx2_resolve_flow_ctrl(bp);
2132                 bnx2_set_mac_link(bp);
2133         }
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142         if (bp->loopback == MAC_LOOPBACK)
2143                 return 0;
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146                 return bnx2_setup_serdes_phy(bp, port);
2147         }
2148         else {
2149                 return bnx2_setup_copper_phy(bp);
2150         }
2151 }
2152
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156         u32 val;
2157
2158         bp->mii_bmcr = MII_BMCR + 0x10;
2159         bp->mii_bmsr = MII_BMSR + 0x10;
2160         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161         bp->mii_adv = MII_ADVERTISE + 0x10;
2162         bp->mii_lpa = MII_LPA + 0x10;
2163         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169         if (reset_phy)
2170                 bnx2_reset_phy(bp);
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173
2174         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182                 val |= BCM5708S_UP1_2G5;
2183         else
2184                 val &= ~BCM5708S_UP1_2G5;
2185         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186
2187         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193
2194         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206         u32 val;
2207
2208         if (reset_phy)
2209                 bnx2_reset_phy(bp);
2210
2211         bp->mii_up1 = BCM5708S_UP1;
2212
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216
2217         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220
2221         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224
2225         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227                 val |= BCM5708S_UP1_2G5;
2228                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2229         }
2230
2231         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2232             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2233             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2234                 /* increase tx signal amplitude */
2235                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236                                BCM5708S_BLK_ADDR_TX_MISC);
2237                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241         }
2242
2243         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245
2246         if (val) {
2247                 u32 is_backplane;
2248
2249                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_TX_MISC);
2253                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_DIG);
2256                 }
2257         }
2258         return 0;
2259 }
2260
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264         if (reset_phy)
2265                 bnx2_reset_phy(bp);
2266
2267         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268
2269         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2270                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271
2272         if (bp->dev->mtu > 1500) {
2273                 u32 val;
2274
2275                 /* Set extended packet length bit */
2276                 bnx2_write_phy(bp, 0x18, 0x7);
2277                 bnx2_read_phy(bp, 0x18, &val);
2278                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279
2280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281                 bnx2_read_phy(bp, 0x1c, &val);
2282                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283         }
2284         else {
2285                 u32 val;
2286
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302         u32 val;
2303
2304         if (reset_phy)
2305                 bnx2_reset_phy(bp);
2306
2307         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308                 bnx2_write_phy(bp, 0x18, 0x0c00);
2309                 bnx2_write_phy(bp, 0x17, 0x000a);
2310                 bnx2_write_phy(bp, 0x15, 0x310b);
2311                 bnx2_write_phy(bp, 0x17, 0x201f);
2312                 bnx2_write_phy(bp, 0x15, 0x9506);
2313                 bnx2_write_phy(bp, 0x17, 0x401f);
2314                 bnx2_write_phy(bp, 0x15, 0x14e2);
2315                 bnx2_write_phy(bp, 0x18, 0x0400);
2316         }
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2321                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322                 val &= ~(1 << 8);
2323                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324         }
2325
2326         if (bp->dev->mtu > 1500) {
2327                 /* Set extended packet length bit */
2328                 bnx2_write_phy(bp, 0x18, 0x7);
2329                 bnx2_read_phy(bp, 0x18, &val);
2330                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331
2332                 bnx2_read_phy(bp, 0x10, &val);
2333                 bnx2_write_phy(bp, 0x10, val | 0x1);
2334         }
2335         else {
2336                 bnx2_write_phy(bp, 0x18, 0x7);
2337                 bnx2_read_phy(bp, 0x18, &val);
2338                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339
2340                 bnx2_read_phy(bp, 0x10, &val);
2341                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2342         }
2343
2344         /* ethernet@wirespeed */
2345         bnx2_write_phy(bp, 0x18, 0x7007);
2346         bnx2_read_phy(bp, 0x18, &val);
2347         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348         return 0;
2349 }
2350
2351
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357         u32 val;
2358         int rc = 0;
2359
2360         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362
2363         bp->mii_bmcr = MII_BMCR;
2364         bp->mii_bmsr = MII_BMSR;
2365         bp->mii_bmsr1 = MII_BMSR;
2366         bp->mii_adv = MII_ADVERTISE;
2367         bp->mii_lpa = MII_LPA;
2368
2369         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370
2371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372                 goto setup_phy;
2373
2374         bnx2_read_phy(bp, MII_PHYSID1, &val);
2375         bp->phy_id = val << 16;
2376         bnx2_read_phy(bp, MII_PHYSID2, &val);
2377         bp->phy_id |= val & 0xffff;
2378
2379         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2381                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2383                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2384                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2385                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2386         }
2387         else {
2388                 rc = bnx2_init_copper_phy(bp, reset_phy);
2389         }
2390
2391 setup_phy:
2392         if (!rc)
2393                 rc = bnx2_setup_phy(bp, bp->phy_port);
2394
2395         return rc;
2396 }
2397
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401         u32 mac_mode;
2402
2403         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2404         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407         bp->link_up = 1;
2408         return 0;
2409 }
2410
2411 static int bnx2_test_link(struct bnx2 *);
2412
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416         u32 mac_mode;
2417         int rc, i;
2418
2419         spin_lock_bh(&bp->phy_lock);
2420         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421                             BMCR_SPEED1000);
2422         spin_unlock_bh(&bp->phy_lock);
2423         if (rc)
2424                 return rc;
2425
2426         for (i = 0; i < 10; i++) {
2427                 if (bnx2_test_link(bp) == 0)
2428                         break;
2429                 msleep(100);
2430         }
2431
2432         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435                       BNX2_EMAC_MODE_25G_MODE);
2436
2437         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439         bp->link_up = 1;
2440         return 0;
2441 }
2442
2443 static int
2444 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2445 {
2446         int i;
2447         u32 val;
2448
2449         bp->fw_wr_seq++;
2450         msg_data |= bp->fw_wr_seq;
2451
2452         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2453
2454         if (!ack)
2455                 return 0;
2456
2457         /* wait for an acknowledgement. */
2458         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2459                 msleep(10);
2460
2461                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2462
2463                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2464                         break;
2465         }
2466         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2467                 return 0;
2468
2469         /* If we timed out, inform the firmware that this is the case. */
2470         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2471                 if (!silent)
2472                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2473
2474                 msg_data &= ~BNX2_DRV_MSG_CODE;
2475                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2476
2477                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2478
2479                 return -EBUSY;
2480         }
2481
2482         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2483                 return -EIO;
2484
2485         return 0;
2486 }
2487
2488 static int
2489 bnx2_init_5709_context(struct bnx2 *bp)
2490 {
2491         int i, ret = 0;
2492         u32 val;
2493
2494         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2495         val |= (BCM_PAGE_BITS - 8) << 16;
2496         REG_WR(bp, BNX2_CTX_COMMAND, val);
2497         for (i = 0; i < 10; i++) {
2498                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2499                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2500                         break;
2501                 udelay(2);
2502         }
2503         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2504                 return -EBUSY;
2505
2506         for (i = 0; i < bp->ctx_pages; i++) {
2507                 int j;
2508
2509                 if (bp->ctx_blk[i])
2510                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2511                 else
2512                         return -ENOMEM;
2513
2514                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2515                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2516                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2517                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2518                        (u64) bp->ctx_blk_mapping[i] >> 32);
2519                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2520                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2521                 for (j = 0; j < 10; j++) {
2522
2523                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2524                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2525                                 break;
2526                         udelay(5);
2527                 }
2528                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2529                         ret = -EBUSY;
2530                         break;
2531                 }
2532         }
2533         return ret;
2534 }
2535
2536 static void
2537 bnx2_init_context(struct bnx2 *bp)
2538 {
2539         u32 vcid;
2540
2541         vcid = 96;
2542         while (vcid) {
2543                 u32 vcid_addr, pcid_addr, offset;
2544                 int i;
2545
2546                 vcid--;
2547
2548                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2549                         u32 new_vcid;
2550
2551                         vcid_addr = GET_PCID_ADDR(vcid);
2552                         if (vcid & 0x8) {
2553                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2554                         }
2555                         else {
2556                                 new_vcid = vcid;
2557                         }
2558                         pcid_addr = GET_PCID_ADDR(new_vcid);
2559                 }
2560                 else {
2561                         vcid_addr = GET_CID_ADDR(vcid);
2562                         pcid_addr = vcid_addr;
2563                 }
2564
2565                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2566                         vcid_addr += (i << PHY_CTX_SHIFT);
2567                         pcid_addr += (i << PHY_CTX_SHIFT);
2568
2569                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2570                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2571
2572                         /* Zero out the context. */
2573                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2574                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2575                 }
2576         }
2577 }
2578
2579 static int
2580 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2581 {
2582         u16 *good_mbuf;
2583         u32 good_mbuf_cnt;
2584         u32 val;
2585
2586         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2587         if (good_mbuf == NULL) {
2588                 pr_err("Failed to allocate memory in %s\n", __func__);
2589                 return -ENOMEM;
2590         }
2591
2592         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2593                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2594
2595         good_mbuf_cnt = 0;
2596
2597         /* Allocate a bunch of mbufs and save the good ones in an array. */
2598         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2599         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2600                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2601                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2602
2603                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2604
2605                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2606
2607                 /* The addresses with Bit 9 set are bad memory blocks. */
2608                 if (!(val & (1 << 9))) {
2609                         good_mbuf[good_mbuf_cnt] = (u16) val;
2610                         good_mbuf_cnt++;
2611                 }
2612
2613                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2614         }
2615
2616         /* Free the good ones back to the mbuf pool thus discarding
2617          * all the bad ones. */
2618         while (good_mbuf_cnt) {
2619                 good_mbuf_cnt--;
2620
2621                 val = good_mbuf[good_mbuf_cnt];
2622                 val = (val << 9) | val | 1;
2623
2624                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2625         }
2626         kfree(good_mbuf);
2627         return 0;
2628 }
2629
2630 static void
2631 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2632 {
2633         u32 val;
2634
2635         val = (mac_addr[0] << 8) | mac_addr[1];
2636
2637         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2638
2639         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2640                 (mac_addr[4] << 8) | mac_addr[5];
2641
2642         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2643 }
2644
2645 static inline int
2646 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2647 {
2648         dma_addr_t mapping;
2649         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2650         struct rx_bd *rxbd =
2651                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2652         struct page *page = alloc_page(gfp);
2653
2654         if (!page)
2655                 return -ENOMEM;
2656         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2657                                PCI_DMA_FROMDEVICE);
2658         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2659                 __free_page(page);
2660                 return -EIO;
2661         }
2662
2663         rx_pg->page = page;
2664         dma_unmap_addr_set(rx_pg, mapping, mapping);
2665         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2666         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2667         return 0;
2668 }
2669
2670 static void
2671 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2672 {
2673         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2674         struct page *page = rx_pg->page;
2675
2676         if (!page)
2677                 return;
2678
2679         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2680                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2681
2682         __free_page(page);
2683         rx_pg->page = NULL;
2684 }
2685
2686 static inline int
2687 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2688 {
2689         struct sk_buff *skb;
2690         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2691         dma_addr_t mapping;
2692         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2693         unsigned long align;
2694
2695         skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2696         if (skb == NULL) {
2697                 return -ENOMEM;
2698         }
2699
2700         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2701                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2702
2703         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2704                                  PCI_DMA_FROMDEVICE);
2705         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2706                 dev_kfree_skb(skb);
2707                 return -EIO;
2708         }
2709
2710         rx_buf->skb = skb;
2711         rx_buf->desc = (struct l2_fhdr *) skb->data;
2712         dma_unmap_addr_set(rx_buf, mapping, mapping);
2713
2714         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2715         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2716
2717         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2718
2719         return 0;
2720 }
2721
2722 static int
2723 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2724 {
2725         struct status_block *sblk = bnapi->status_blk.msi;
2726         u32 new_link_state, old_link_state;
2727         int is_set = 1;
2728
2729         new_link_state = sblk->status_attn_bits & event;
2730         old_link_state = sblk->status_attn_bits_ack & event;
2731         if (new_link_state != old_link_state) {
2732                 if (new_link_state)
2733                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2734                 else
2735                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2736         } else
2737                 is_set = 0;
2738
2739         return is_set;
2740 }
2741
2742 static void
2743 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2744 {
2745         spin_lock(&bp->phy_lock);
2746
2747         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2748                 bnx2_set_link(bp);
2749         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2750                 bnx2_set_remote_link(bp);
2751
2752         spin_unlock(&bp->phy_lock);
2753
2754 }
2755
2756 static inline u16
2757 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2758 {
2759         u16 cons;
2760
2761         /* Tell compiler that status block fields can change. */
2762         barrier();
2763         cons = *bnapi->hw_tx_cons_ptr;
2764         barrier();
2765         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2766                 cons++;
2767         return cons;
2768 }
2769
2770 static int
2771 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2772 {
2773         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2774         u16 hw_cons, sw_cons, sw_ring_cons;
2775         int tx_pkt = 0, index;
2776         struct netdev_queue *txq;
2777
2778         index = (bnapi - bp->bnx2_napi);
2779         txq = netdev_get_tx_queue(bp->dev, index);
2780
2781         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2782         sw_cons = txr->tx_cons;
2783
2784         while (sw_cons != hw_cons) {
2785                 struct sw_tx_bd *tx_buf;
2786                 struct sk_buff *skb;
2787                 int i, last;
2788
2789                 sw_ring_cons = TX_RING_IDX(sw_cons);
2790
2791                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2792                 skb = tx_buf->skb;
2793
2794                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2795                 prefetch(&skb->end);
2796
2797                 /* partial BD completions possible with TSO packets */
2798                 if (tx_buf->is_gso) {
2799                         u16 last_idx, last_ring_idx;
2800
2801                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2802                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2803                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2804                                 last_idx++;
2805                         }
2806                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2807                                 break;
2808                         }
2809                 }
2810
2811                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2812                         skb_headlen(skb), PCI_DMA_TODEVICE);
2813
2814                 tx_buf->skb = NULL;
2815                 last = tx_buf->nr_frags;
2816
2817                 for (i = 0; i < last; i++) {
2818                         sw_cons = NEXT_TX_BD(sw_cons);
2819
2820                         dma_unmap_page(&bp->pdev->dev,
2821                                 dma_unmap_addr(
2822                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2823                                         mapping),
2824                                 skb_shinfo(skb)->frags[i].size,
2825                                 PCI_DMA_TODEVICE);
2826                 }
2827
2828                 sw_cons = NEXT_TX_BD(sw_cons);
2829
2830                 dev_kfree_skb(skb);
2831                 tx_pkt++;
2832                 if (tx_pkt == budget)
2833                         break;
2834
2835                 if (hw_cons == sw_cons)
2836                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2837         }
2838
2839         txr->hw_tx_cons = hw_cons;
2840         txr->tx_cons = sw_cons;
2841
2842         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2843          * before checking for netif_tx_queue_stopped().  Without the
2844          * memory barrier, there is a small possibility that bnx2_start_xmit()
2845          * will miss it and cause the queue to be stopped forever.
2846          */
2847         smp_mb();
2848
2849         if (unlikely(netif_tx_queue_stopped(txq)) &&
2850                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2851                 __netif_tx_lock(txq, smp_processor_id());
2852                 if ((netif_tx_queue_stopped(txq)) &&
2853                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2854                         netif_tx_wake_queue(txq);
2855                 __netif_tx_unlock(txq);
2856         }
2857
2858         return tx_pkt;
2859 }
2860
2861 static void
2862 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2863                         struct sk_buff *skb, int count)
2864 {
2865         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2866         struct rx_bd *cons_bd, *prod_bd;
2867         int i;
2868         u16 hw_prod, prod;
2869         u16 cons = rxr->rx_pg_cons;
2870
2871         cons_rx_pg = &rxr->rx_pg_ring[cons];
2872
2873         /* The caller was unable to allocate a new page to replace the
2874          * last one in the frags array, so we need to recycle that page
2875          * and then free the skb.
2876          */
2877         if (skb) {
2878                 struct page *page;
2879                 struct skb_shared_info *shinfo;
2880
2881                 shinfo = skb_shinfo(skb);
2882                 shinfo->nr_frags--;
2883                 page = shinfo->frags[shinfo->nr_frags].page;
2884                 shinfo->frags[shinfo->nr_frags].page = NULL;
2885
2886                 cons_rx_pg->page = page;
2887                 dev_kfree_skb(skb);
2888         }
2889
2890         hw_prod = rxr->rx_pg_prod;
2891
2892         for (i = 0; i < count; i++) {
2893                 prod = RX_PG_RING_IDX(hw_prod);
2894
2895                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2896                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2897                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2898                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2899
2900                 if (prod != cons) {
2901                         prod_rx_pg->page = cons_rx_pg->page;
2902                         cons_rx_pg->page = NULL;
2903                         dma_unmap_addr_set(prod_rx_pg, mapping,
2904                                 dma_unmap_addr(cons_rx_pg, mapping));
2905
2906                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2907                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2908
2909                 }
2910                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2911                 hw_prod = NEXT_RX_BD(hw_prod);
2912         }
2913         rxr->rx_pg_prod = hw_prod;
2914         rxr->rx_pg_cons = cons;
2915 }
2916
2917 static inline void
2918 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2919                   struct sk_buff *skb, u16 cons, u16 prod)
2920 {
2921         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2922         struct rx_bd *cons_bd, *prod_bd;
2923
2924         cons_rx_buf = &rxr->rx_buf_ring[cons];
2925         prod_rx_buf = &rxr->rx_buf_ring[prod];
2926
2927         dma_sync_single_for_device(&bp->pdev->dev,
2928                 dma_unmap_addr(cons_rx_buf, mapping),
2929                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2930
2931         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2932
2933         prod_rx_buf->skb = skb;
2934         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2935
2936         if (cons == prod)
2937                 return;
2938
2939         dma_unmap_addr_set(prod_rx_buf, mapping,
2940                         dma_unmap_addr(cons_rx_buf, mapping));
2941
2942         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2943         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2944         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2945         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2946 }
2947
2948 static int
2949 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2950             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2951             u32 ring_idx)
2952 {
2953         int err;
2954         u16 prod = ring_idx & 0xffff;
2955
2956         err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2957         if (unlikely(err)) {
2958                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2959                 if (hdr_len) {
2960                         unsigned int raw_len = len + 4;
2961                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2962
2963                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2964                 }
2965                 return err;
2966         }
2967
2968         skb_reserve(skb, BNX2_RX_OFFSET);
2969         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2970                          PCI_DMA_FROMDEVICE);
2971
2972         if (hdr_len == 0) {
2973                 skb_put(skb, len);
2974                 return 0;
2975         } else {
2976                 unsigned int i, frag_len, frag_size, pages;
2977                 struct sw_pg *rx_pg;
2978                 u16 pg_cons = rxr->rx_pg_cons;
2979                 u16 pg_prod = rxr->rx_pg_prod;
2980
2981                 frag_size = len + 4 - hdr_len;
2982                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2983                 skb_put(skb, hdr_len);
2984
2985                 for (i = 0; i < pages; i++) {
2986                         dma_addr_t mapping_old;
2987
2988                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2989                         if (unlikely(frag_len <= 4)) {
2990                                 unsigned int tail = 4 - frag_len;
2991
2992                                 rxr->rx_pg_cons = pg_cons;
2993                                 rxr->rx_pg_prod = pg_prod;
2994                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2995                                                         pages - i);
2996                                 skb->len -= tail;
2997                                 if (i == 0) {
2998                                         skb->tail -= tail;
2999                                 } else {
3000                                         skb_frag_t *frag =
3001                                                 &skb_shinfo(skb)->frags[i - 1];
3002                                         frag->size -= tail;
3003                                         skb->data_len -= tail;
3004                                         skb->truesize -= tail;
3005                                 }
3006                                 return 0;
3007                         }
3008                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3009
3010                         /* Don't unmap yet.  If we're unable to allocate a new
3011                          * page, we need to recycle the page and the DMA addr.
3012                          */
3013                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3014                         if (i == pages - 1)
3015                                 frag_len -= 4;
3016
3017                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3018                         rx_pg->page = NULL;
3019
3020                         err = bnx2_alloc_rx_page(bp, rxr,
3021                                                  RX_PG_RING_IDX(pg_prod),
3022                                                  GFP_ATOMIC);
3023                         if (unlikely(err)) {
3024                                 rxr->rx_pg_cons = pg_cons;
3025                                 rxr->rx_pg_prod = pg_prod;
3026                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3027                                                         pages - i);
3028                                 return err;
3029                         }
3030
3031                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3032                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3033
3034                         frag_size -= frag_len;
3035                         skb->data_len += frag_len;
3036                         skb->truesize += frag_len;
3037                         skb->len += frag_len;
3038
3039                         pg_prod = NEXT_RX_BD(pg_prod);
3040                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3041                 }
3042                 rxr->rx_pg_prod = pg_prod;
3043                 rxr->rx_pg_cons = pg_cons;
3044         }
3045         return 0;
3046 }
3047
3048 static inline u16
3049 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3050 {
3051         u16 cons;
3052
3053         /* Tell compiler that status block fields can change. */
3054         barrier();
3055         cons = *bnapi->hw_rx_cons_ptr;
3056         barrier();
3057         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3058                 cons++;
3059         return cons;
3060 }
3061
3062 static int
3063 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3064 {
3065         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3066         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3067         struct l2_fhdr *rx_hdr;
3068         int rx_pkt = 0, pg_ring_used = 0;
3069
3070         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3071         sw_cons = rxr->rx_cons;
3072         sw_prod = rxr->rx_prod;
3073
3074         /* Memory barrier necessary as speculative reads of the rx
3075          * buffer can be ahead of the index in the status block
3076          */
3077         rmb();
3078         while (sw_cons != hw_cons) {
3079                 unsigned int len, hdr_len;
3080                 u32 status;
3081                 struct sw_bd *rx_buf, *next_rx_buf;
3082                 struct sk_buff *skb;
3083                 dma_addr_t dma_addr;
3084
3085                 sw_ring_cons = RX_RING_IDX(sw_cons);
3086                 sw_ring_prod = RX_RING_IDX(sw_prod);
3087
3088                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3089                 skb = rx_buf->skb;
3090                 prefetchw(skb);
3091
3092                 next_rx_buf =
3093                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3094                 prefetch(next_rx_buf->desc);
3095
3096                 rx_buf->skb = NULL;
3097
3098                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3099
3100                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3101                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3102                         PCI_DMA_FROMDEVICE);
3103
3104                 rx_hdr = rx_buf->desc;
3105                 len = rx_hdr->l2_fhdr_pkt_len;
3106                 status = rx_hdr->l2_fhdr_status;
3107
3108                 hdr_len = 0;
3109                 if (status & L2_FHDR_STATUS_SPLIT) {
3110                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3111                         pg_ring_used = 1;
3112                 } else if (len > bp->rx_jumbo_thresh) {
3113                         hdr_len = bp->rx_jumbo_thresh;
3114                         pg_ring_used = 1;
3115                 }
3116
3117                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3118                                        L2_FHDR_ERRORS_PHY_DECODE |
3119                                        L2_FHDR_ERRORS_ALIGNMENT |
3120                                        L2_FHDR_ERRORS_TOO_SHORT |
3121                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3122
3123                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3124                                           sw_ring_prod);
3125                         if (pg_ring_used) {
3126                                 int pages;
3127
3128                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3129
3130                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3131                         }
3132                         goto next_rx;
3133                 }
3134
3135                 len -= 4;
3136
3137                 if (len <= bp->rx_copy_thresh) {
3138                         struct sk_buff *new_skb;
3139
3140                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3141                         if (new_skb == NULL) {
3142                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3143                                                   sw_ring_prod);
3144                                 goto next_rx;
3145                         }
3146
3147                         /* aligned copy */
3148                         skb_copy_from_linear_data_offset(skb,
3149                                                          BNX2_RX_OFFSET - 6,
3150                                       new_skb->data, len + 6);
3151                         skb_reserve(new_skb, 6);
3152                         skb_put(new_skb, len);
3153
3154                         bnx2_reuse_rx_skb(bp, rxr, skb,
3155                                 sw_ring_cons, sw_ring_prod);
3156
3157                         skb = new_skb;
3158                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3159                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3160                         goto next_rx;
3161
3162                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3163                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3164                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3165
3166                 skb->protocol = eth_type_trans(skb, bp->dev);
3167
3168                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3169                         (ntohs(skb->protocol) != 0x8100)) {
3170
3171                         dev_kfree_skb(skb);
3172                         goto next_rx;
3173
3174                 }
3175
3176                 skb_checksum_none_assert(skb);
3177                 if (bp->rx_csum &&
3178                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3180
3181                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3182                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3183                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3184                 }
3185                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3186                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3187                      L2_FHDR_STATUS_USE_RXHASH))
3188                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3189
3190                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3191                 napi_gro_receive(&bnapi->napi, skb);
3192                 rx_pkt++;
3193
3194 next_rx:
3195                 sw_cons = NEXT_RX_BD(sw_cons);
3196                 sw_prod = NEXT_RX_BD(sw_prod);
3197
3198                 if ((rx_pkt == budget))
3199                         break;
3200
3201                 /* Refresh hw_cons to see if there is new work */
3202                 if (sw_cons == hw_cons) {
3203                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3204                         rmb();
3205                 }
3206         }
3207         rxr->rx_cons = sw_cons;
3208         rxr->rx_prod = sw_prod;
3209
3210         if (pg_ring_used)
3211                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3212
3213         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3214
3215         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3216
3217         mmiowb();
3218
3219         return rx_pkt;
3220
3221 }
3222
3223 /* MSI ISR - The only difference between this and the INTx ISR
3224  * is that the MSI interrupt is always serviced.
3225  */
3226 static irqreturn_t
3227 bnx2_msi(int irq, void *dev_instance)
3228 {
3229         struct bnx2_napi *bnapi = dev_instance;
3230         struct bnx2 *bp = bnapi->bp;
3231
3232         prefetch(bnapi->status_blk.msi);
3233         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3234                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3235                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3236
3237         /* Return here if interrupt is disabled. */
3238         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3239                 return IRQ_HANDLED;
3240
3241         napi_schedule(&bnapi->napi);
3242
3243         return IRQ_HANDLED;
3244 }
3245
3246 static irqreturn_t
3247 bnx2_msi_1shot(int irq, void *dev_instance)
3248 {
3249         struct bnx2_napi *bnapi = dev_instance;
3250         struct bnx2 *bp = bnapi->bp;
3251
3252         prefetch(bnapi->status_blk.msi);
3253
3254         /* Return here if interrupt is disabled. */
3255         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3256                 return IRQ_HANDLED;
3257
3258         napi_schedule(&bnapi->napi);
3259
3260         return IRQ_HANDLED;
3261 }
3262
3263 static irqreturn_t
3264 bnx2_interrupt(int irq, void *dev_instance)
3265 {
3266         struct bnx2_napi *bnapi = dev_instance;
3267         struct bnx2 *bp = bnapi->bp;
3268         struct status_block *sblk = bnapi->status_blk.msi;
3269
3270         /* When using INTx, it is possible for the interrupt to arrive
3271          * at the CPU before the status block posted prior to the
3272          * interrupt. Reading a register will flush the status block.
3273          * When using MSI, the MSI message will always complete after
3274          * the status block write.
3275          */
3276         if ((sblk->status_idx == bnapi->last_status_idx) &&
3277             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3278              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3279                 return IRQ_NONE;
3280
3281         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3282                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3283                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3284
3285         /* Read back to deassert IRQ immediately to avoid too many
3286          * spurious interrupts.
3287          */
3288         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3289
3290         /* Return here if interrupt is shared and is disabled. */
3291         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3292                 return IRQ_HANDLED;
3293
3294         if (napi_schedule_prep(&bnapi->napi)) {
3295                 bnapi->last_status_idx = sblk->status_idx;
3296                 __napi_schedule(&bnapi->napi);
3297         }
3298
3299         return IRQ_HANDLED;
3300 }
3301
3302 static inline int
3303 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3304 {
3305         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3306         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3307
3308         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3309             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3310                 return 1;
3311         return 0;
3312 }
3313
3314 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3315                                  STATUS_ATTN_BITS_TIMER_ABORT)
3316
3317 static inline int
3318 bnx2_has_work(struct bnx2_napi *bnapi)
3319 {
3320         struct status_block *sblk = bnapi->status_blk.msi;
3321
3322         if (bnx2_has_fast_work(bnapi))
3323                 return 1;
3324
3325 #ifdef BCM_CNIC
3326         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3327                 return 1;
3328 #endif
3329
3330         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3331             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3332                 return 1;
3333
3334         return 0;
3335 }
3336
3337 static void
3338 bnx2_chk_missed_msi(struct bnx2 *bp)
3339 {
3340         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3341         u32 msi_ctrl;
3342
3343         if (bnx2_has_work(bnapi)) {
3344                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3345                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3346                         return;
3347
3348                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3349                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3350                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3351                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3352                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3353                 }
3354         }
3355
3356         bp->idle_chk_status_idx = bnapi->last_status_idx;
3357 }
3358
3359 #ifdef BCM_CNIC
3360 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3361 {
3362         struct cnic_ops *c_ops;
3363
3364         if (!bnapi->cnic_present)
3365                 return;
3366
3367         rcu_read_lock();
3368         c_ops = rcu_dereference(bp->cnic_ops);
3369         if (c_ops)
3370                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3371                                                       bnapi->status_blk.msi);
3372         rcu_read_unlock();
3373 }
3374 #endif
3375
3376 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3377 {
3378         struct status_block *sblk = bnapi->status_blk.msi;
3379         u32 status_attn_bits = sblk->status_attn_bits;
3380         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3381
3382         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3383             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3384
3385                 bnx2_phy_int(bp, bnapi);
3386
3387                 /* This is needed to take care of transient status
3388                  * during link changes.
3389                  */
3390                 REG_WR(bp, BNX2_HC_COMMAND,
3391                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3392                 REG_RD(bp, BNX2_HC_COMMAND);
3393         }
3394 }
3395
3396 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3397                           int work_done, int budget)
3398 {
3399         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3400         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3401
3402         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3403                 bnx2_tx_int(bp, bnapi, 0);
3404
3405         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3406                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3407
3408         return work_done;
3409 }
3410
3411 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3412 {
3413         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3414         struct bnx2 *bp = bnapi->bp;
3415         int work_done = 0;
3416         struct status_block_msix *sblk = bnapi->status_blk.msix;
3417
3418         while (1) {
3419                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3420                 if (unlikely(work_done >= budget))
3421                         break;
3422
3423                 bnapi->last_status_idx = sblk->status_idx;
3424                 /* status idx must be read before checking for more work. */
3425                 rmb();
3426                 if (likely(!bnx2_has_fast_work(bnapi))) {
3427
3428                         napi_complete(napi);
3429                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3430                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3431                                bnapi->last_status_idx);
3432                         break;
3433                 }
3434         }
3435         return work_done;
3436 }
3437
3438 static int bnx2_poll(struct napi_struct *napi, int budget)
3439 {
3440         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3441         struct bnx2 *bp = bnapi->bp;
3442         int work_done = 0;
3443         struct status_block *sblk = bnapi->status_blk.msi;
3444
3445         while (1) {
3446                 bnx2_poll_link(bp, bnapi);
3447
3448                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3449
3450 #ifdef BCM_CNIC
3451                 bnx2_poll_cnic(bp, bnapi);
3452 #endif
3453
3454                 /* bnapi->last_status_idx is used below to tell the hw how
3455                  * much work has been processed, so we must read it before
3456                  * checking for more work.
3457                  */
3458                 bnapi->last_status_idx = sblk->status_idx;
3459
3460                 if (unlikely(work_done >= budget))
3461                         break;
3462
3463                 rmb();
3464                 if (likely(!bnx2_has_work(bnapi))) {
3465                         napi_complete(napi);
3466                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3467                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3468                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3469                                        bnapi->last_status_idx);
3470                                 break;
3471                         }
3472                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3473                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3474                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3475                                bnapi->last_status_idx);
3476
3477                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3478                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3479                                bnapi->last_status_idx);
3480                         break;
3481                 }
3482         }
3483
3484         return work_done;
3485 }
3486
3487 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3488  * from set_multicast.
3489  */
3490 static void
3491 bnx2_set_rx_mode(struct net_device *dev)
3492 {
3493         struct bnx2 *bp = netdev_priv(dev);
3494         u32 rx_mode, sort_mode;
3495         struct netdev_hw_addr *ha;
3496         int i;
3497
3498         if (!netif_running(dev))
3499                 return;
3500
3501         spin_lock_bh(&bp->phy_lock);
3502
3503         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3504                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3505         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3506         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3507              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3508                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3509         if (dev->flags & IFF_PROMISC) {
3510                 /* Promiscuous mode. */
3511                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3512                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3513                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3514         }
3515         else if (dev->flags & IFF_ALLMULTI) {
3516                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3517                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3518                                0xffffffff);
3519                 }
3520                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3521         }
3522         else {
3523                 /* Accept one or more multicast(s). */
3524                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3525                 u32 regidx;
3526                 u32 bit;
3527                 u32 crc;
3528
3529                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3530
3531                 netdev_for_each_mc_addr(ha, dev) {
3532                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3533                         bit = crc & 0xff;
3534                         regidx = (bit & 0xe0) >> 5;
3535                         bit &= 0x1f;
3536                         mc_filter[regidx] |= (1 << bit);
3537                 }
3538
3539                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3540                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3541                                mc_filter[i]);
3542                 }
3543
3544                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3545         }
3546
3547         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3548                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3549                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3550                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3551         } else if (!(dev->flags & IFF_PROMISC)) {
3552                 /* Add all entries into to the match filter list */
3553                 i = 0;
3554                 netdev_for_each_uc_addr(ha, dev) {
3555                         bnx2_set_mac_addr(bp, ha->addr,
3556                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3557                         sort_mode |= (1 <<
3558                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3559                         i++;
3560                 }
3561
3562         }
3563
3564         if (rx_mode != bp->rx_mode) {
3565                 bp->rx_mode = rx_mode;
3566                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3567         }
3568
3569         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3570         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3571         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3572
3573         spin_unlock_bh(&bp->phy_lock);
3574 }
3575
3576 static int __devinit
3577 check_fw_section(const struct firmware *fw,
3578                  const struct bnx2_fw_file_section *section,
3579                  u32 alignment, bool non_empty)
3580 {
3581         u32 offset = be32_to_cpu(section->offset);
3582         u32 len = be32_to_cpu(section->len);
3583
3584         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3585                 return -EINVAL;
3586         if ((non_empty && len == 0) || len > fw->size - offset ||
3587             len & (alignment - 1))
3588                 return -EINVAL;
3589         return 0;
3590 }
3591
3592 static int __devinit
3593 check_mips_fw_entry(const struct firmware *fw,
3594                     const struct bnx2_mips_fw_file_entry *entry)
3595 {
3596         if (check_fw_section(fw, &entry->text, 4, true) ||
3597             check_fw_section(fw, &entry->data, 4, false) ||
3598             check_fw_section(fw, &entry->rodata, 4, false))
3599                 return -EINVAL;
3600         return 0;
3601 }
3602
3603 static int __devinit
3604 bnx2_request_firmware(struct bnx2 *bp)
3605 {
3606         const char *mips_fw_file, *rv2p_fw_file;
3607         const struct bnx2_mips_fw_file *mips_fw;
3608         const struct bnx2_rv2p_fw_file *rv2p_fw;
3609         int rc;
3610
3611         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3612                 mips_fw_file = FW_MIPS_FILE_09;
3613                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3614                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3615                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3616                 else
3617                         rv2p_fw_file = FW_RV2P_FILE_09;
3618         } else {
3619                 mips_fw_file = FW_MIPS_FILE_06;
3620                 rv2p_fw_file = FW_RV2P_FILE_06;
3621         }
3622
3623         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3624         if (rc) {
3625                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3626                 return rc;
3627         }
3628
3629         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3630         if (rc) {
3631                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3632                 return rc;
3633         }
3634         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3635         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3636         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3637             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3638             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3639             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3640             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3641             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3642                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3643                 return -EINVAL;
3644         }
3645         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3646             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3647             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3648                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3649                 return -EINVAL;
3650         }
3651
3652         return 0;
3653 }
3654
3655 static u32
3656 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3657 {
3658         switch (idx) {
3659         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3660                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3661                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3662                 break;
3663         }
3664         return rv2p_code;
3665 }
3666
3667 static int
3668 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3669              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3670 {
3671         u32 rv2p_code_len, file_offset;
3672         __be32 *rv2p_code;
3673         int i;
3674         u32 val, cmd, addr;
3675
3676         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3677         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3678
3679         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3680
3681         if (rv2p_proc == RV2P_PROC1) {
3682                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3683                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3684         } else {
3685                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3686                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3687         }
3688
3689         for (i = 0; i < rv2p_code_len; i += 8) {
3690                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3691                 rv2p_code++;
3692                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3693                 rv2p_code++;
3694
3695                 val = (i / 8) | cmd;
3696                 REG_WR(bp, addr, val);
3697         }
3698
3699         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3700         for (i = 0; i < 8; i++) {
3701                 u32 loc, code;
3702
3703                 loc = be32_to_cpu(fw_entry->fixup[i]);
3704                 if (loc && ((loc * 4) < rv2p_code_len)) {
3705                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3706                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3707                         code = be32_to_cpu(*(rv2p_code + loc));
3708                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3709                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3710
3711                         val = (loc / 2) | cmd;
3712                         REG_WR(bp, addr, val);
3713                 }
3714         }
3715
3716         /* Reset the processor, un-stall is done later. */
3717         if (rv2p_proc == RV2P_PROC1) {
3718                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3719         }
3720         else {
3721                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3722         }
3723
3724         return 0;
3725 }
3726
3727 static int
3728 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3729             const struct bnx2_mips_fw_file_entry *fw_entry)
3730 {
3731         u32 addr, len, file_offset;
3732         __be32 *data;
3733         u32 offset;
3734         u32 val;
3735
3736         /* Halt the CPU. */
3737         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3738         val |= cpu_reg->mode_value_halt;
3739         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3740         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3741
3742         /* Load the Text area. */
3743         addr = be32_to_cpu(fw_entry->text.addr);
3744         len = be32_to_cpu(fw_entry->text.len);
3745         file_offset = be32_to_cpu(fw_entry->text.offset);
3746         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3747
3748         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3749         if (len) {
3750                 int j;
3751
3752                 for (j = 0; j < (len / 4); j++, offset += 4)
3753                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3754         }
3755
3756         /* Load the Data area. */
3757         addr = be32_to_cpu(fw_entry->data.addr);
3758         len = be32_to_cpu(fw_entry->data.len);
3759         file_offset = be32_to_cpu(fw_entry->data.offset);
3760         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3761
3762         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3763         if (len) {
3764                 int j;
3765
3766                 for (j = 0; j < (len / 4); j++, offset += 4)
3767                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3768         }
3769
3770         /* Load the Read-Only area. */
3771         addr = be32_to_cpu(fw_entry->rodata.addr);
3772         len = be32_to_cpu(fw_entry->rodata.len);
3773         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3774         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3775
3776         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3777         if (len) {
3778                 int j;
3779
3780                 for (j = 0; j < (len / 4); j++, offset += 4)
3781                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3782         }
3783
3784         /* Clear the pre-fetch instruction. */
3785         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3786
3787         val = be32_to_cpu(fw_entry->start_addr);
3788         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3789
3790         /* Start the CPU. */
3791         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3792         val &= ~cpu_reg->mode_value_halt;
3793         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3794         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3795
3796         return 0;
3797 }
3798
3799 static int
3800 bnx2_init_cpus(struct bnx2 *bp)
3801 {
3802         const struct bnx2_mips_fw_file *mips_fw =
3803                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3804         const struct bnx2_rv2p_fw_file *rv2p_fw =
3805                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3806         int rc;
3807
3808         /* Initialize the RV2P processor. */
3809         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3810         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3811
3812         /* Initialize the RX Processor. */
3813         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3814         if (rc)
3815                 goto init_cpu_err;
3816
3817         /* Initialize the TX Processor. */
3818         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3819         if (rc)
3820                 goto init_cpu_err;
3821
3822         /* Initialize the TX Patch-up Processor. */
3823         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3824         if (rc)
3825                 goto init_cpu_err;
3826
3827         /* Initialize the Completion Processor. */
3828         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3829         if (rc)
3830                 goto init_cpu_err;
3831
3832         /* Initialize the Command Processor. */
3833         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3834
3835 init_cpu_err:
3836         return rc;
3837 }
3838
3839 static int
3840 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3841 {
3842         u16 pmcsr;
3843
3844         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3845
3846         switch (state) {
3847         case PCI_D0: {
3848                 u32 val;
3849
3850                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3851                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3852                         PCI_PM_CTRL_PME_STATUS);
3853
3854                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3855                         /* delay required during transition out of D3hot */
3856                         msleep(20);
3857
3858                 val = REG_RD(bp, BNX2_EMAC_MODE);
3859                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3860                 val &= ~BNX2_EMAC_MODE_MPKT;
3861                 REG_WR(bp, BNX2_EMAC_MODE, val);
3862
3863                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3864                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3865                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3866                 break;
3867         }
3868         case PCI_D3hot: {
3869                 int i;
3870                 u32 val, wol_msg;
3871
3872                 if (bp->wol) {
3873                         u32 advertising;
3874                         u8 autoneg;
3875
3876                         autoneg = bp->autoneg;
3877                         advertising = bp->advertising;
3878
3879                         if (bp->phy_port == PORT_TP) {
3880                                 bp->autoneg = AUTONEG_SPEED;
3881                                 bp->advertising = ADVERTISED_10baseT_Half |
3882                                         ADVERTISED_10baseT_Full |
3883                                         ADVERTISED_100baseT_Half |
3884                                         ADVERTISED_100baseT_Full |
3885                                         ADVERTISED_Autoneg;
3886                         }
3887
3888                         spin_lock_bh(&bp->phy_lock);
3889                         bnx2_setup_phy(bp, bp->phy_port);
3890                         spin_unlock_bh(&bp->phy_lock);
3891
3892                         bp->autoneg = autoneg;
3893                         bp->advertising = advertising;
3894
3895                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3896
3897                         val = REG_RD(bp, BNX2_EMAC_MODE);
3898
3899                         /* Enable port mode. */
3900                         val &= ~BNX2_EMAC_MODE_PORT;
3901                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3902                                BNX2_EMAC_MODE_ACPI_RCVD |
3903                                BNX2_EMAC_MODE_MPKT;
3904                         if (bp->phy_port == PORT_TP)
3905                                 val |= BNX2_EMAC_MODE_PORT_MII;
3906                         else {
3907                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3908                                 if (bp->line_speed == SPEED_2500)
3909                                         val |= BNX2_EMAC_MODE_25G_MODE;
3910                         }
3911
3912                         REG_WR(bp, BNX2_EMAC_MODE, val);
3913
3914                         /* receive all multicast */
3915                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3916                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3917                                        0xffffffff);
3918                         }
3919                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3920                                BNX2_EMAC_RX_MODE_SORT_MODE);
3921
3922                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3923                               BNX2_RPM_SORT_USER0_MC_EN;
3924                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3925                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3926                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3927                                BNX2_RPM_SORT_USER0_ENA);
3928
3929                         /* Need to enable EMAC and RPM for WOL. */
3930                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3931                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3932                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3933                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3934
3935                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3936                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3938
3939                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3940                 }
3941                 else {
3942                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3943                 }
3944
3945                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3946                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3947                                      1, 0);
3948
3949                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3950                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3951                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3952
3953                         if (bp->wol)
3954                                 pmcsr |= 3;
3955                 }
3956                 else {
3957                         pmcsr |= 3;
3958                 }
3959                 if (bp->wol) {
3960                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3961                 }
3962                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3963                                       pmcsr);
3964
3965                 /* No more memory access after this point until
3966                  * device is brought back to D0.
3967                  */
3968                 udelay(50);
3969                 break;
3970         }
3971         default:
3972                 return -EINVAL;
3973         }
3974         return 0;
3975 }
3976
3977 static int
3978 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3979 {
3980         u32 val;
3981         int j;
3982
3983         /* Request access to the flash interface. */
3984         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3985         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3986                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3987                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3988                         break;
3989
3990                 udelay(5);
3991         }
3992
3993         if (j >= NVRAM_TIMEOUT_COUNT)
3994                 return -EBUSY;
3995
3996         return 0;
3997 }
3998
3999 static int
4000 bnx2_release_nvram_lock(struct bnx2 *bp)
4001 {
4002         int j;
4003         u32 val;
4004
4005         /* Relinquish nvram interface. */
4006         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4007
4008         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4009                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4010                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4011                         break;
4012
4013                 udelay(5);
4014         }
4015
4016         if (j >= NVRAM_TIMEOUT_COUNT)
4017                 return -EBUSY;
4018
4019         return 0;
4020 }
4021
4022
4023 static int
4024 bnx2_enable_nvram_write(struct bnx2 *bp)
4025 {
4026         u32 val;
4027
4028         val = REG_RD(bp, BNX2_MISC_CFG);
4029         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4030
4031         if (bp->flash_info->flags & BNX2_NV_WREN) {
4032                 int j;
4033
4034                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4035                 REG_WR(bp, BNX2_NVM_COMMAND,
4036                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4037
4038                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4039                         udelay(5);
4040
4041                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4042                         if (val & BNX2_NVM_COMMAND_DONE)
4043                                 break;
4044                 }
4045
4046                 if (j >= NVRAM_TIMEOUT_COUNT)
4047                         return -EBUSY;
4048         }
4049         return 0;
4050 }
4051
4052 static void
4053 bnx2_disable_nvram_write(struct bnx2 *bp)
4054 {
4055         u32 val;
4056
4057         val = REG_RD(bp, BNX2_MISC_CFG);
4058         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4059 }
4060
4061
4062 static void
4063 bnx2_enable_nvram_access(struct bnx2 *bp)
4064 {
4065         u32 val;
4066
4067         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4068         /* Enable both bits, even on read. */
4069         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4070                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4071 }
4072
4073 static void
4074 bnx2_disable_nvram_access(struct bnx2 *bp)
4075 {
4076         u32 val;
4077
4078         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4079         /* Disable both bits, even after read. */
4080         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4081                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4082                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4083 }
4084
4085 static int
4086 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4087 {
4088         u32 cmd;
4089         int j;
4090
4091         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4092                 /* Buffered flash, no erase needed */
4093                 return 0;
4094
4095         /* Build an erase command */
4096         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4097               BNX2_NVM_COMMAND_DOIT;
4098
4099         /* Need to clear DONE bit separately. */
4100         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4101
4102         /* Address of the NVRAM to read from. */
4103         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4104
4105         /* Issue an erase command. */
4106         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4107
4108         /* Wait for completion. */
4109         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4110                 u32 val;
4111
4112                 udelay(5);
4113
4114                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4115                 if (val & BNX2_NVM_COMMAND_DONE)
4116                         break;
4117         }
4118
4119         if (j >= NVRAM_TIMEOUT_COUNT)
4120                 return -EBUSY;
4121
4122         return 0;
4123 }
4124
4125 static int
4126 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4127 {
4128         u32 cmd;
4129         int j;
4130
4131         /* Build the command word. */
4132         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4133
4134         /* Calculate an offset of a buffered flash, not needed for 5709. */
4135         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4136                 offset = ((offset / bp->flash_info->page_size) <<
4137                            bp->flash_info->page_bits) +
4138                           (offset % bp->flash_info->page_size);
4139         }
4140
4141         /* Need to clear DONE bit separately. */
4142         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4143
4144         /* Address of the NVRAM to read from. */
4145         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4146
4147         /* Issue a read command. */
4148         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4149
4150         /* Wait for completion. */
4151         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4152                 u32 val;
4153
4154                 udelay(5);
4155
4156                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4157                 if (val & BNX2_NVM_COMMAND_DONE) {
4158                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4159                         memcpy(ret_val, &v, 4);
4160                         break;
4161                 }
4162         }
4163         if (j >= NVRAM_TIMEOUT_COUNT)
4164                 return -EBUSY;
4165
4166         return 0;
4167 }
4168
4169
4170 static int
4171 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4172 {
4173         u32 cmd;
4174         __be32 val32;
4175         int j;
4176
4177         /* Build the command word. */
4178         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4179
4180         /* Calculate an offset of a buffered flash, not needed for 5709. */
4181         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4182                 offset = ((offset / bp->flash_info->page_size) <<
4183                           bp->flash_info->page_bits) +
4184                          (offset % bp->flash_info->page_size);
4185         }
4186
4187         /* Need to clear DONE bit separately. */
4188         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4189
4190         memcpy(&val32, val, 4);
4191
4192         /* Write the data. */
4193         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4194
4195         /* Address of the NVRAM to write to. */
4196         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4197
4198         /* Issue the write command. */
4199         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4200
4201         /* Wait for completion. */
4202         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4203                 udelay(5);
4204
4205                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4206                         break;
4207         }
4208         if (j >= NVRAM_TIMEOUT_COUNT)
4209                 return -EBUSY;
4210
4211         return 0;
4212 }
4213
4214 static int
4215 bnx2_init_nvram(struct bnx2 *bp)
4216 {
4217         u32 val;
4218         int j, entry_count, rc = 0;
4219         const struct flash_spec *flash;
4220
4221         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4222                 bp->flash_info = &flash_5709;
4223                 goto get_flash_size;
4224         }
4225
4226         /* Determine the selected interface. */
4227         val = REG_RD(bp, BNX2_NVM_CFG1);
4228
4229         entry_count = ARRAY_SIZE(flash_table);
4230
4231         if (val & 0x40000000) {
4232
4233                 /* Flash interface has been reconfigured */
4234                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4235                      j++, flash++) {
4236                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4237                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4238                                 bp->flash_info = flash;
4239                                 break;
4240                         }
4241                 }
4242         }
4243         else {
4244                 u32 mask;
4245                 /* Not yet been reconfigured */
4246
4247                 if (val & (1 << 23))
4248                         mask = FLASH_BACKUP_STRAP_MASK;
4249                 else
4250                         mask = FLASH_STRAP_MASK;
4251
4252                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4253                         j++, flash++) {
4254
4255                         if ((val & mask) == (flash->strapping & mask)) {
4256                                 bp->flash_info = flash;
4257
4258                                 /* Request access to the flash interface. */
4259                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4260                                         return rc;
4261
4262                                 /* Enable access to flash interface */
4263                                 bnx2_enable_nvram_access(bp);
4264
4265                                 /* Reconfigure the flash interface */
4266                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4267                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4268                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4269                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4270
4271                                 /* Disable access to flash interface */
4272                                 bnx2_disable_nvram_access(bp);
4273                                 bnx2_release_nvram_lock(bp);
4274
4275                                 break;
4276                         }
4277                 }
4278         } /* if (val & 0x40000000) */
4279
4280         if (j == entry_count) {
4281                 bp->flash_info = NULL;
4282                 pr_alert("Unknown flash/EEPROM type\n");
4283                 return -ENODEV;
4284         }
4285
4286 get_flash_size:
4287         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4288         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4289         if (val)
4290                 bp->flash_size = val;
4291         else
4292                 bp->flash_size = bp->flash_info->total_size;
4293
4294         return rc;
4295 }
4296
4297 static int
4298 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4299                 int buf_size)
4300 {
4301         int rc = 0;
4302         u32 cmd_flags, offset32, len32, extra;
4303
4304         if (buf_size == 0)
4305                 return 0;
4306
4307         /* Request access to the flash interface. */
4308         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309                 return rc;
4310
4311         /* Enable access to flash interface */
4312         bnx2_enable_nvram_access(bp);
4313
4314         len32 = buf_size;
4315         offset32 = offset;
4316         extra = 0;
4317
4318         cmd_flags = 0;
4319
4320         if (offset32 & 3) {
4321                 u8 buf[4];
4322                 u32 pre_len;
4323
4324                 offset32 &= ~3;
4325                 pre_len = 4 - (offset & 3);
4326
4327                 if (pre_len >= len32) {
4328                         pre_len = len32;
4329                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4330                                     BNX2_NVM_COMMAND_LAST;
4331                 }
4332                 else {
4333                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4334                 }
4335
4336                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4337
4338                 if (rc)
4339                         return rc;
4340
4341                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4342
4343                 offset32 += 4;
4344                 ret_buf += pre_len;
4345                 len32 -= pre_len;
4346         }
4347         if (len32 & 3) {
4348                 extra = 4 - (len32 & 3);
4349                 len32 = (len32 + 4) & ~3;
4350         }
4351
4352         if (len32 == 4) {
4353                 u8 buf[4];
4354
4355                 if (cmd_flags)
4356                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4357                 else
4358                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4359                                     BNX2_NVM_COMMAND_LAST;
4360
4361                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4362
4363                 memcpy(ret_buf, buf, 4 - extra);
4364         }
4365         else if (len32 > 0) {
4366                 u8 buf[4];
4367
4368                 /* Read the first word. */
4369                 if (cmd_flags)
4370                         cmd_flags = 0;
4371                 else
4372                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4373
4374                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4375
4376                 /* Advance to the next dword. */
4377                 offset32 += 4;
4378                 ret_buf += 4;
4379                 len32 -= 4;
4380
4381                 while (len32 > 4 && rc == 0) {
4382                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4383
4384                         /* Advance to the next dword. */
4385                         offset32 += 4;
4386                         ret_buf += 4;
4387                         len32 -= 4;
4388                 }
4389
4390                 if (rc)
4391                         return rc;
4392
4393                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4394                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4395
4396                 memcpy(ret_buf, buf, 4 - extra);
4397         }
4398
4399         /* Disable access to flash interface */
4400         bnx2_disable_nvram_access(bp);
4401
4402         bnx2_release_nvram_lock(bp);
4403
4404         return rc;
4405 }
4406
4407 static int
4408 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4409                 int buf_size)
4410 {
4411         u32 written, offset32, len32;
4412         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4413         int rc = 0;
4414         int align_start, align_end;
4415
4416         buf = data_buf;
4417         offset32 = offset;
4418         len32 = buf_size;
4419         align_start = align_end = 0;
4420
4421         if ((align_start = (offset32 & 3))) {
4422                 offset32 &= ~3;
4423                 len32 += align_start;
4424                 if (len32 < 4)
4425                         len32 = 4;
4426                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4427                         return rc;
4428         }
4429
4430         if (len32 & 3) {
4431                 align_end = 4 - (len32 & 3);
4432                 len32 += align_end;
4433                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4434                         return rc;
4435         }
4436
4437         if (align_start || align_end) {
4438                 align_buf = kmalloc(len32, GFP_KERNEL);
4439                 if (align_buf == NULL)
4440                         return -ENOMEM;
4441                 if (align_start) {
4442                         memcpy(align_buf, start, 4);
4443                 }
4444                 if (align_end) {
4445                         memcpy(align_buf + len32 - 4, end, 4);
4446                 }
4447                 memcpy(align_buf + align_start, data_buf, buf_size);
4448                 buf = align_buf;
4449         }
4450
4451         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4452                 flash_buffer = kmalloc(264, GFP_KERNEL);
4453                 if (flash_buffer == NULL) {
4454                         rc = -ENOMEM;
4455                         goto nvram_write_end;
4456                 }
4457         }
4458
4459         written = 0;
4460         while ((written < len32) && (rc == 0)) {
4461                 u32 page_start, page_end, data_start, data_end;
4462                 u32 addr, cmd_flags;
4463                 int i;
4464
4465                 /* Find the page_start addr */
4466                 page_start = offset32 + written;
4467                 page_start -= (page_start % bp->flash_info->page_size);
4468                 /* Find the page_end addr */
4469                 page_end = page_start + bp->flash_info->page_size;
4470                 /* Find the data_start addr */
4471                 data_start = (written == 0) ? offset32 : page_start;
4472                 /* Find the data_end addr */
4473                 data_end = (page_end > offset32 + len32) ?
4474                         (offset32 + len32) : page_end;
4475
4476                 /* Request access to the flash interface. */
4477                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4478                         goto nvram_write_end;
4479
4480                 /* Enable access to flash interface */
4481                 bnx2_enable_nvram_access(bp);
4482
4483                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4484                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4485                         int j;
4486
4487                         /* Read the whole page into the buffer
4488                          * (non-buffer flash only) */
4489                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4490                                 if (j == (bp->flash_info->page_size - 4)) {
4491                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4492                                 }
4493                                 rc = bnx2_nvram_read_dword(bp,
4494                                         page_start + j,
4495                                         &flash_buffer[j],
4496                                         cmd_flags);
4497
4498                                 if (rc)
4499                                         goto nvram_write_end;
4500
4501                                 cmd_flags = 0;
4502                         }
4503                 }
4504
4505                 /* Enable writes to flash interface (unlock write-protect) */
4506                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4507                         goto nvram_write_end;
4508
4509                 /* Loop to write back the buffer data from page_start to
4510                  * data_start */
4511                 i = 0;
4512                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4513                         /* Erase the page */
4514                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4515                                 goto nvram_write_end;
4516
4517                         /* Re-enable the write again for the actual write */
4518                         bnx2_enable_nvram_write(bp);
4519
4520                         for (addr = page_start; addr < data_start;
4521                                 addr += 4, i += 4) {
4522
4523                                 rc = bnx2_nvram_write_dword(bp, addr,
4524                                         &flash_buffer[i], cmd_flags);
4525
4526                                 if (rc != 0)
4527                                         goto nvram_write_end;
4528
4529                                 cmd_flags = 0;
4530                         }
4531                 }
4532
4533                 /* Loop to write the new data from data_start to data_end */
4534                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4535                         if ((addr == page_end - 4) ||
4536                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4537                                  (addr == data_end - 4))) {
4538
4539                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4540                         }
4541                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4542                                 cmd_flags);
4543
4544                         if (rc != 0)
4545                                 goto nvram_write_end;
4546
4547                         cmd_flags = 0;
4548                         buf += 4;
4549                 }
4550
4551                 /* Loop to write back the buffer data from data_end
4552                  * to page_end */
4553                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554                         for (addr = data_end; addr < page_end;
4555                                 addr += 4, i += 4) {
4556
4557                                 if (addr == page_end-4) {
4558                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4559                                 }
4560                                 rc = bnx2_nvram_write_dword(bp, addr,
4561                                         &flash_buffer[i], cmd_flags);
4562
4563                                 if (rc != 0)
4564                                         goto nvram_write_end;
4565
4566                                 cmd_flags = 0;
4567                         }
4568                 }
4569
4570                 /* Disable writes to flash interface (lock write-protect) */
4571                 bnx2_disable_nvram_write(bp);
4572
4573                 /* Disable access to flash interface */
4574                 bnx2_disable_nvram_access(bp);
4575                 bnx2_release_nvram_lock(bp);
4576
4577                 /* Increment written */
4578                 written += data_end - data_start;
4579         }
4580
4581 nvram_write_end:
4582         kfree(flash_buffer);
4583         kfree(align_buf);
4584         return rc;
4585 }
4586
4587 static void
4588 bnx2_init_fw_cap(struct bnx2 *bp)
4589 {
4590         u32 val, sig = 0;
4591
4592         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4593         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4594
4595         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4596                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4597
4598         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4599         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4600                 return;
4601
4602         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4603                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4604                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4605         }
4606
4607         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4608             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4609                 u32 link;
4610
4611                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4612
4613                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4614                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4615                         bp->phy_port = PORT_FIBRE;
4616                 else
4617                         bp->phy_port = PORT_TP;
4618
4619                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4620                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4621         }
4622
4623         if (netif_running(bp->dev) && sig)
4624                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4625 }
4626
4627 static void
4628 bnx2_setup_msix_tbl(struct bnx2 *bp)
4629 {
4630         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4631
4632         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4633         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4634 }
4635
4636 static int
4637 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4638 {
4639         u32 val;
4640         int i, rc = 0;
4641         u8 old_port;
4642
4643         /* Wait for the current PCI transaction to complete before
4644          * issuing a reset. */
4645         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4646             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4647                 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4648                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4649                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4650                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4651                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4652                 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4653                 udelay(5);
4654         } else {  /* 5709 */
4655                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4656                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4657                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4658                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4659
4660                 for (i = 0; i < 100; i++) {
4661                         msleep(1);
4662                         val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4663                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4664                                 break;
4665                 }
4666         }
4667
4668         /* Wait for the firmware to tell us it is ok to issue a reset. */
4669         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4670
4671         /* Deposit a driver reset signature so the firmware knows that
4672          * this is a soft reset. */
4673         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4674                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4675
4676         /* Do a dummy read to force the chip to complete all current transaction
4677          * before we issue a reset. */
4678         val = REG_RD(bp, BNX2_MISC_ID);
4679
4680         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4681                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4682                 REG_RD(bp, BNX2_MISC_COMMAND);
4683                 udelay(5);
4684
4685                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4686                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4687
4688                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4689
4690         } else {
4691                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4692                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4693                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4694
4695                 /* Chip reset. */
4696                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4697
4698                 /* Reading back any register after chip reset will hang the
4699                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4700                  * of margin for write posting.
4701                  */
4702                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4703                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4704                         msleep(20);
4705
4706                 /* Reset takes approximate 30 usec */
4707                 for (i = 0; i < 10; i++) {
4708                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4709                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4710                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4711                                 break;
4712                         udelay(10);
4713                 }
4714
4715                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4716                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4717                         pr_err("Chip reset did not complete\n");
4718                         return -EBUSY;
4719                 }
4720         }
4721
4722         /* Make sure byte swapping is properly configured. */
4723         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4724         if (val != 0x01020304) {
4725                 pr_err("Chip not in correct endian mode\n");
4726                 return -ENODEV;
4727         }
4728
4729         /* Wait for the firmware to finish its initialization. */
4730         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4731         if (rc)
4732                 return rc;
4733
4734         spin_lock_bh(&bp->phy_lock);
4735         old_port = bp->phy_port;
4736         bnx2_init_fw_cap(bp);
4737         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4738             old_port != bp->phy_port)
4739                 bnx2_set_default_remote_link(bp);
4740         spin_unlock_bh(&bp->phy_lock);
4741
4742         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4743                 /* Adjust the voltage regular to two steps lower.  The default
4744                  * of this register is 0x0000000e. */
4745                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4746
4747                 /* Remove bad rbuf memory from the free pool. */
4748                 rc = bnx2_alloc_bad_rbuf(bp);
4749         }
4750
4751         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4752                 bnx2_setup_msix_tbl(bp);
4753                 /* Prevent MSIX table reads and write from timing out */
4754                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4755                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4756         }
4757
4758         return rc;
4759 }
4760
4761 static int
4762 bnx2_init_chip(struct bnx2 *bp)
4763 {
4764         u32 val, mtu;
4765         int rc, i;
4766
4767         /* Make sure the interrupt is not active. */
4768         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4769
4770         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4771               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4772 #ifdef __BIG_ENDIAN
4773               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4774 #endif
4775               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4776               DMA_READ_CHANS << 12 |
4777               DMA_WRITE_CHANS << 16;
4778
4779         val |= (0x2 << 20) | (1 << 11);
4780
4781         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4782                 val |= (1 << 23);
4783
4784         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4785             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4786                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4787
4788         REG_WR(bp, BNX2_DMA_CONFIG, val);
4789
4790         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4791                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4792                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4793                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4794         }
4795
4796         if (bp->flags & BNX2_FLAG_PCIX) {
4797                 u16 val16;
4798
4799                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                      &val16);
4801                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4802                                       val16 & ~PCI_X_CMD_ERO);
4803         }
4804
4805         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4806                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4807                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4808                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4809
4810         /* Initialize context mapping and zero out the quick contexts.  The
4811          * context block must have already been enabled. */
4812         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4813                 rc = bnx2_init_5709_context(bp);
4814                 if (rc)
4815                         return rc;
4816         } else
4817                 bnx2_init_context(bp);
4818
4819         if ((rc = bnx2_init_cpus(bp)) != 0)
4820                 return rc;
4821
4822         bnx2_init_nvram(bp);
4823
4824         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4825
4826         val = REG_RD(bp, BNX2_MQ_CONFIG);
4827         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4828         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4829         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4830                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4831                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4832                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4833         }
4834
4835         REG_WR(bp, BNX2_MQ_CONFIG, val);
4836
4837         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4838         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4839         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4840
4841         val = (BCM_PAGE_BITS - 8) << 24;
4842         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4843
4844         /* Configure page size. */
4845         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4846         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4847         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4848         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4849
4850         val = bp->mac_addr[0] +
4851               (bp->mac_addr[1] << 8) +
4852               (bp->mac_addr[2] << 16) +
4853               bp->mac_addr[3] +
4854               (bp->mac_addr[4] << 8) +
4855               (bp->mac_addr[5] << 16);
4856         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4857
4858         /* Program the MTU.  Also include 4 bytes for CRC32. */
4859         mtu = bp->dev->mtu;
4860         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4861         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4862                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4863         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4864
4865         if (mtu < 1500)
4866                 mtu = 1500;
4867
4868         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4869         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4870         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4871
4872         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4873         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4874                 bp->bnx2_napi[i].last_status_idx = 0;
4875
4876         bp->idle_chk_status_idx = 0xffff;
4877
4878         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4879
4880         /* Set up how to generate a link change interrupt. */
4881         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4882
4883         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4884                (u64) bp->status_blk_mapping & 0xffffffff);
4885         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4886
4887         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4888                (u64) bp->stats_blk_mapping & 0xffffffff);
4889         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4890                (u64) bp->stats_blk_mapping >> 32);
4891
4892         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4893                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4894
4895         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4896                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4897
4898         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4899                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4900
4901         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4902
4903         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4904
4905         REG_WR(bp, BNX2_HC_COM_TICKS,
4906                (bp->com_ticks_int << 16) | bp->com_ticks);
4907
4908         REG_WR(bp, BNX2_HC_CMD_TICKS,
4909                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4910
4911         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4912                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4913         else
4914                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4915         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4916
4917         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4918                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4919         else {
4920                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4921                       BNX2_HC_CONFIG_COLLECT_STATS;
4922         }
4923
4924         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4925                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4926                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4927
4928                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4929         }
4930
4931         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4932                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4933
4934         REG_WR(bp, BNX2_HC_CONFIG, val);
4935
4936         if (bp->rx_ticks < 25)
4937                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4938         else
4939                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4940
4941         for (i = 1; i < bp->irq_nvecs; i++) {
4942                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4943                            BNX2_HC_SB_CONFIG_1;
4944
4945                 REG_WR(bp, base,
4946                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4947                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4948                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4949
4950                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4951                         (bp->tx_quick_cons_trip_int << 16) |
4952                          bp->tx_quick_cons_trip);
4953
4954                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4955                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4956
4957                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4958                        (bp->rx_quick_cons_trip_int << 16) |
4959                         bp->rx_quick_cons_trip);
4960
4961                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4962                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4963         }
4964
4965         /* Clear internal stats counters. */
4966         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4967
4968         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4969
4970         /* Initialize the receive filter. */
4971         bnx2_set_rx_mode(bp->dev);
4972
4973         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4974                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4975                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4976                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4977         }
4978         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4979                           1, 0);
4980
4981         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4982         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4983
4984         udelay(20);
4985
4986         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4987
4988         return rc;
4989 }
4990
4991 static void
4992 bnx2_clear_ring_states(struct bnx2 *bp)
4993 {
4994         struct bnx2_napi *bnapi;
4995         struct bnx2_tx_ring_info *txr;
4996         struct bnx2_rx_ring_info *rxr;
4997         int i;
4998
4999         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5000                 bnapi = &bp->bnx2_napi[i];
5001                 txr = &bnapi->tx_ring;
5002                 rxr = &bnapi->rx_ring;
5003
5004                 txr->tx_cons = 0;
5005                 txr->hw_tx_cons = 0;
5006                 rxr->rx_prod_bseq = 0;
5007                 rxr->rx_prod = 0;
5008                 rxr->rx_cons = 0;
5009                 rxr->rx_pg_prod = 0;
5010                 rxr->rx_pg_cons = 0;
5011         }
5012 }
5013
5014 static void
5015 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5016 {
5017         u32 val, offset0, offset1, offset2, offset3;
5018         u32 cid_addr = GET_CID_ADDR(cid);
5019
5020         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5021                 offset0 = BNX2_L2CTX_TYPE_XI;
5022                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5023                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5024                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5025         } else {
5026                 offset0 = BNX2_L2CTX_TYPE;
5027                 offset1 = BNX2_L2CTX_CMD_TYPE;
5028                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5029                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5030         }
5031         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5032         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5033
5034         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5035         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5036
5037         val = (u64) txr->tx_desc_mapping >> 32;
5038         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5039
5040         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5041         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5042 }
5043
5044 static void
5045 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5046 {
5047         struct tx_bd *txbd;
5048         u32 cid = TX_CID;
5049         struct bnx2_napi *bnapi;
5050         struct bnx2_tx_ring_info *txr;
5051
5052         bnapi = &bp->bnx2_napi[ring_num];
5053         txr = &bnapi->tx_ring;
5054
5055         if (ring_num == 0)
5056                 cid = TX_CID;
5057         else
5058                 cid = TX_TSS_CID + ring_num - 1;
5059
5060         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5061
5062         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5063
5064         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5065         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5066
5067         txr->tx_prod = 0;
5068         txr->tx_prod_bseq = 0;
5069
5070         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5071         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5072
5073         bnx2_init_tx_context(bp, cid, txr);
5074 }
5075
5076 static void
5077 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5078                      int num_rings)
5079 {
5080         int i;
5081         struct rx_bd *rxbd;
5082
5083         for (i = 0; i < num_rings; i++) {
5084                 int j;
5085
5086                 rxbd = &rx_ring[i][0];
5087                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5088                         rxbd->rx_bd_len = buf_size;
5089                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5090                 }
5091                 if (i == (num_rings - 1))
5092                         j = 0;
5093                 else
5094                         j = i + 1;
5095                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5096                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5097         }
5098 }
5099
5100 static void
5101 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5102 {
5103         int i;
5104         u16 prod, ring_prod;
5105         u32 cid, rx_cid_addr, val;
5106         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5107         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5108
5109         if (ring_num == 0)
5110                 cid = RX_CID;
5111         else
5112                 cid = RX_RSS_CID + ring_num - 1;
5113
5114         rx_cid_addr = GET_CID_ADDR(cid);
5115
5116         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5117                              bp->rx_buf_use_size, bp->rx_max_ring);
5118
5119         bnx2_init_rx_context(bp, cid);
5120
5121         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5122                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5123                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5124         }
5125
5126         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5127         if (bp->rx_pg_ring_size) {
5128                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5129                                      rxr->rx_pg_desc_mapping,
5130                                      PAGE_SIZE, bp->rx_max_pg_ring);
5131                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5132                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5133                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5134                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5135
5136                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5137                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5138
5139                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5141
5142                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5143                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5144         }
5145
5146         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5147         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5148
5149         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5150         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5151
5152         ring_prod = prod = rxr->rx_pg_prod;
5153         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5154                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5155                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5156                                     ring_num, i, bp->rx_pg_ring_size);
5157                         break;
5158                 }
5159                 prod = NEXT_RX_BD(prod);
5160                 ring_prod = RX_PG_RING_IDX(prod);
5161         }
5162         rxr->rx_pg_prod = prod;
5163
5164         ring_prod = prod = rxr->rx_prod;
5165         for (i = 0; i < bp->rx_ring_size; i++) {
5166                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5167                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5168                                     ring_num, i, bp->rx_ring_size);
5169                         break;
5170                 }
5171                 prod = NEXT_RX_BD(prod);
5172                 ring_prod = RX_RING_IDX(prod);
5173         }
5174         rxr->rx_prod = prod;
5175
5176         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5177         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5178         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5179
5180         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5181         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5182
5183         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5184 }
5185
5186 static void
5187 bnx2_init_all_rings(struct bnx2 *bp)
5188 {
5189         int i;
5190         u32 val;
5191
5192         bnx2_clear_ring_states(bp);
5193
5194         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5195         for (i = 0; i < bp->num_tx_rings; i++)
5196                 bnx2_init_tx_ring(bp, i);
5197
5198         if (bp->num_tx_rings > 1)
5199                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5200                        (TX_TSS_CID << 7));
5201
5202         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5203         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5204
5205         for (i = 0; i < bp->num_rx_rings; i++)
5206                 bnx2_init_rx_ring(bp, i);
5207
5208         if (bp->num_rx_rings > 1) {
5209                 u32 tbl_32 = 0;
5210
5211                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5212                         int shift = (i % 8) << 2;
5213
5214                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5215                         if ((i % 8) == 7) {
5216                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5217                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5218                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5219                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5220                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5221                                 tbl_32 = 0;
5222                         }
5223                 }
5224
5225                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5226                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5227
5228                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5229
5230         }
5231 }
5232
5233 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5234 {
5235         u32 max, num_rings = 1;
5236
5237         while (ring_size > MAX_RX_DESC_CNT) {
5238                 ring_size -= MAX_RX_DESC_CNT;
5239                 num_rings++;
5240         }
5241         /* round to next power of 2 */
5242         max = max_size;
5243         while ((max & num_rings) == 0)
5244                 max >>= 1;
5245
5246         if (num_rings != max)
5247                 max <<= 1;
5248
5249         return max;
5250 }
5251
5252 static void
5253 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5254 {
5255         u32 rx_size, rx_space, jumbo_size;
5256
5257         /* 8 for CRC and VLAN */
5258         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5259
5260         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5261                 sizeof(struct skb_shared_info);
5262
5263         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5264         bp->rx_pg_ring_size = 0;
5265         bp->rx_max_pg_ring = 0;
5266         bp->rx_max_pg_ring_idx = 0;
5267         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5268                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5269
5270                 jumbo_size = size * pages;
5271                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5272                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5273
5274                 bp->rx_pg_ring_size = jumbo_size;
5275                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5276                                                         MAX_RX_PG_RINGS);
5277                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5278                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5279                 bp->rx_copy_thresh = 0;
5280         }
5281
5282         bp->rx_buf_use_size = rx_size;
5283         /* hw alignment */
5284         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5285         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5286         bp->rx_ring_size = size;
5287         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5288         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5289 }
5290
5291 static void
5292 bnx2_free_tx_skbs(struct bnx2 *bp)
5293 {
5294         int i;
5295
5296         for (i = 0; i < bp->num_tx_rings; i++) {
5297                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5298                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5299                 int j;
5300
5301                 if (txr->tx_buf_ring == NULL)
5302                         continue;
5303
5304                 for (j = 0; j < TX_DESC_CNT; ) {
5305                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5306                         struct sk_buff *skb = tx_buf->skb;
5307                         int k, last;
5308
5309                         if (skb == NULL) {
5310                                 j++;
5311                                 continue;
5312                         }
5313
5314                         dma_unmap_single(&bp->pdev->dev,
5315                                          dma_unmap_addr(tx_buf, mapping),
5316                                          skb_headlen(skb),
5317                                          PCI_DMA_TODEVICE);
5318
5319                         tx_buf->skb = NULL;
5320
5321                         last = tx_buf->nr_frags;
5322                         j++;
5323                         for (k = 0; k < last; k++, j++) {
5324                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325                                 dma_unmap_page(&bp->pdev->dev,
5326                                         dma_unmap_addr(tx_buf, mapping),
5327                                         skb_shinfo(skb)->frags[k].size,
5328                                         PCI_DMA_TODEVICE);
5329                         }
5330                         dev_kfree_skb(skb);
5331                 }
5332         }
5333 }
5334
5335 static void
5336 bnx2_free_rx_skbs(struct bnx2 *bp)
5337 {
5338         int i;
5339
5340         for (i = 0; i < bp->num_rx_rings; i++) {
5341                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5342                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5343                 int j;
5344
5345                 if (rxr->rx_buf_ring == NULL)
5346                         return;
5347
5348                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5349                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5350                         struct sk_buff *skb = rx_buf->skb;
5351
5352                         if (skb == NULL)
5353                                 continue;
5354
5355                         dma_unmap_single(&bp->pdev->dev,
5356                                          dma_unmap_addr(rx_buf, mapping),
5357                                          bp->rx_buf_use_size,
5358                                          PCI_DMA_FROMDEVICE);
5359
5360                         rx_buf->skb = NULL;
5361
5362                         dev_kfree_skb(skb);
5363                 }
5364                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5365                         bnx2_free_rx_page(bp, rxr, j);
5366         }
5367 }
5368
5369 static void
5370 bnx2_free_skbs(struct bnx2 *bp)
5371 {
5372         bnx2_free_tx_skbs(bp);
5373         bnx2_free_rx_skbs(bp);
5374 }
5375
5376 static int
5377 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5378 {
5379         int rc;
5380
5381         rc = bnx2_reset_chip(bp, reset_code);
5382         bnx2_free_skbs(bp);
5383         if (rc)
5384                 return rc;
5385
5386         if ((rc = bnx2_init_chip(bp)) != 0)
5387                 return rc;
5388
5389         bnx2_init_all_rings(bp);
5390         return 0;
5391 }
5392
5393 static int
5394 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5395 {
5396         int rc;
5397
5398         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5399                 return rc;
5400
5401         spin_lock_bh(&bp->phy_lock);
5402         bnx2_init_phy(bp, reset_phy);
5403         bnx2_set_link(bp);
5404         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5405                 bnx2_remote_phy_event(bp);
5406         spin_unlock_bh(&bp->phy_lock);
5407         return 0;
5408 }
5409
5410 static int
5411 bnx2_shutdown_chip(struct bnx2 *bp)
5412 {
5413         u32 reset_code;
5414
5415         if (bp->flags & BNX2_FLAG_NO_WOL)
5416                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5417         else if (bp->wol)
5418                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5419         else
5420                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5421
5422         return bnx2_reset_chip(bp, reset_code);
5423 }
5424
5425 static int
5426 bnx2_test_registers(struct bnx2 *bp)
5427 {
5428         int ret;
5429         int i, is_5709;
5430         static const struct {
5431                 u16   offset;
5432                 u16   flags;
5433 #define BNX2_FL_NOT_5709        1
5434                 u32   rw_mask;
5435                 u32   ro_mask;
5436         } reg_tbl[] = {
5437                 { 0x006c, 0, 0x00000000, 0x0000003f },
5438                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5439                 { 0x0094, 0, 0x00000000, 0x00000000 },
5440
5441                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5442                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5444                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5445                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5446                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5447                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5448                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450
5451                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5457
5458                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5459                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5460                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5461
5462                 { 0x1000, 0, 0x00000000, 0x00000001 },
5463                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5464
5465                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5466                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5467                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5468                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5469                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5470                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5471                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5472                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5473                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5474                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5475
5476                 { 0x1800, 0, 0x00000000, 0x00000001 },
5477                 { 0x1804, 0, 0x00000000, 0x00000003 },
5478
5479                 { 0x2800, 0, 0x00000000, 0x00000001 },
5480                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5481                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5482                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5483                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5484                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5485                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5486                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5487                 { 0x2840, 0, 0x00000000, 0xffffffff },
5488                 { 0x2844, 0, 0x00000000, 0xffffffff },
5489                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5490                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5491
5492                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5493                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5494
5495                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5496                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5497                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5498                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5499                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5500                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5501                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5502                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5503                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5504
5505                 { 0x5004, 0, 0x00000000, 0x0000007f },
5506                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5507
5508                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5509                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5510                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5511                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5512                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5513                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5514                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5515                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5516                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5517
5518                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5519                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5520                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5521                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5522                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5523                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5524                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5525                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5526                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5527                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5528                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5529                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5530                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5531                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5532                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5533                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5534                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5535                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5536                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5537                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5538                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5539                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5540                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5541
5542                 { 0xffff, 0, 0x00000000, 0x00000000 },
5543         };
5544
5545         ret = 0;
5546         is_5709 = 0;
5547         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5548                 is_5709 = 1;
5549
5550         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5551                 u32 offset, rw_mask, ro_mask, save_val, val;
5552                 u16 flags = reg_tbl[i].flags;
5553
5554                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5555                         continue;
5556
5557                 offset = (u32) reg_tbl[i].offset;
5558                 rw_mask = reg_tbl[i].rw_mask;
5559                 ro_mask = reg_tbl[i].ro_mask;
5560
5561                 save_val = readl(bp->regview + offset);
5562
5563                 writel(0, bp->regview + offset);
5564
5565                 val = readl(bp->regview + offset);
5566                 if ((val & rw_mask) != 0) {
5567                         goto reg_test_err;
5568                 }
5569
5570                 if ((val & ro_mask) != (save_val & ro_mask)) {
5571                         goto reg_test_err;
5572                 }
5573
5574                 writel(0xffffffff, bp->regview + offset);
5575
5576                 val = readl(bp->regview + offset);
5577                 if ((val & rw_mask) != rw_mask) {
5578                         goto reg_test_err;
5579                 }
5580
5581                 if ((val & ro_mask) != (save_val & ro_mask)) {
5582                         goto reg_test_err;
5583                 }
5584
5585                 writel(save_val, bp->regview + offset);
5586                 continue;
5587
5588 reg_test_err:
5589                 writel(save_val, bp->regview + offset);
5590                 ret = -ENODEV;
5591                 break;
5592         }
5593         return ret;
5594 }
5595
5596 static int
5597 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5598 {
5599         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5600                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5601         int i;
5602
5603         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5604                 u32 offset;
5605
5606                 for (offset = 0; offset < size; offset += 4) {
5607
5608                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5609
5610                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5611                                 test_pattern[i]) {
5612                                 return -ENODEV;
5613                         }
5614                 }
5615         }
5616         return 0;
5617 }
5618
5619 static int
5620 bnx2_test_memory(struct bnx2 *bp)
5621 {
5622         int ret = 0;
5623         int i;
5624         static struct mem_entry {
5625                 u32   offset;
5626                 u32   len;
5627         } mem_tbl_5706[] = {
5628                 { 0x60000,  0x4000 },
5629                 { 0xa0000,  0x3000 },
5630                 { 0xe0000,  0x4000 },
5631                 { 0x120000, 0x4000 },
5632                 { 0x1a0000, 0x4000 },
5633                 { 0x160000, 0x4000 },
5634                 { 0xffffffff, 0    },
5635         },
5636         mem_tbl_5709[] = {
5637                 { 0x60000,  0x4000 },
5638                 { 0xa0000,  0x3000 },
5639                 { 0xe0000,  0x4000 },
5640                 { 0x120000, 0x4000 },
5641                 { 0x1a0000, 0x4000 },
5642                 { 0xffffffff, 0    },
5643         };
5644         struct mem_entry *mem_tbl;
5645
5646         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5647                 mem_tbl = mem_tbl_5709;
5648         else
5649                 mem_tbl = mem_tbl_5706;
5650
5651         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5652                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5653                         mem_tbl[i].len)) != 0) {
5654                         return ret;
5655                 }
5656         }
5657
5658         return ret;
5659 }
5660
5661 #define BNX2_MAC_LOOPBACK       0
5662 #define BNX2_PHY_LOOPBACK       1
5663
5664 static int
5665 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5666 {
5667         unsigned int pkt_size, num_pkts, i;
5668         struct sk_buff *skb, *rx_skb;
5669         unsigned char *packet;
5670         u16 rx_start_idx, rx_idx;
5671         dma_addr_t map;
5672         struct tx_bd *txbd;
5673         struct sw_bd *rx_buf;
5674         struct l2_fhdr *rx_hdr;
5675         int ret = -ENODEV;
5676         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5677         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5678         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5679
5680         tx_napi = bnapi;
5681
5682         txr = &tx_napi->tx_ring;
5683         rxr = &bnapi->rx_ring;
5684         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5685                 bp->loopback = MAC_LOOPBACK;
5686                 bnx2_set_mac_loopback(bp);
5687         }
5688         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5689                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5690                         return 0;
5691
5692                 bp->loopback = PHY_LOOPBACK;
5693                 bnx2_set_phy_loopback(bp);
5694         }
5695         else
5696                 return -EINVAL;
5697
5698         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5699         skb = netdev_alloc_skb(bp->dev, pkt_size);
5700         if (!skb)
5701                 return -ENOMEM;
5702         packet = skb_put(skb, pkt_size);
5703         memcpy(packet, bp->dev->dev_addr, 6);
5704         memset(packet + 6, 0x0, 8);
5705         for (i = 14; i < pkt_size; i++)
5706                 packet[i] = (unsigned char) (i & 0xff);
5707
5708         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5709                              PCI_DMA_TODEVICE);
5710         if (dma_mapping_error(&bp->pdev->dev, map)) {
5711                 dev_kfree_skb(skb);
5712                 return -EIO;
5713         }
5714
5715         REG_WR(bp, BNX2_HC_COMMAND,
5716                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5717
5718         REG_RD(bp, BNX2_HC_COMMAND);
5719
5720         udelay(5);
5721         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5722
5723         num_pkts = 0;
5724
5725         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5726
5727         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5728         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5729         txbd->tx_bd_mss_nbytes = pkt_size;
5730         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5731
5732         num_pkts++;
5733         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5734         txr->tx_prod_bseq += pkt_size;
5735
5736         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5737         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5738
5739         udelay(100);
5740
5741         REG_WR(bp, BNX2_HC_COMMAND,
5742                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5743
5744         REG_RD(bp, BNX2_HC_COMMAND);
5745
5746         udelay(5);
5747
5748         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5749         dev_kfree_skb(skb);
5750
5751         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5752                 goto loopback_test_done;
5753
5754         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5755         if (rx_idx != rx_start_idx + num_pkts) {
5756                 goto loopback_test_done;
5757         }
5758
5759         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5760         rx_skb = rx_buf->skb;
5761
5762         rx_hdr = rx_buf->desc;
5763         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5764
5765         dma_sync_single_for_cpu(&bp->pdev->dev,
5766                 dma_unmap_addr(rx_buf, mapping),
5767                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5768
5769         if (rx_hdr->l2_fhdr_status &
5770                 (L2_FHDR_ERRORS_BAD_CRC |
5771                 L2_FHDR_ERRORS_PHY_DECODE |
5772                 L2_FHDR_ERRORS_ALIGNMENT |
5773                 L2_FHDR_ERRORS_TOO_SHORT |
5774                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5775
5776                 goto loopback_test_done;
5777         }
5778
5779         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5780                 goto loopback_test_done;
5781         }
5782
5783         for (i = 14; i < pkt_size; i++) {
5784                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5785                         goto loopback_test_done;
5786                 }
5787         }
5788
5789         ret = 0;
5790
5791 loopback_test_done:
5792         bp->loopback = 0;
5793         return ret;
5794 }
5795
5796 #define BNX2_MAC_LOOPBACK_FAILED        1
5797 #define BNX2_PHY_LOOPBACK_FAILED        2
5798 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5799                                          BNX2_PHY_LOOPBACK_FAILED)
5800
5801 static int
5802 bnx2_test_loopback(struct bnx2 *bp)
5803 {
5804         int rc = 0;
5805
5806         if (!netif_running(bp->dev))
5807                 return BNX2_LOOPBACK_FAILED;
5808
5809         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5810         spin_lock_bh(&bp->phy_lock);
5811         bnx2_init_phy(bp, 1);
5812         spin_unlock_bh(&bp->phy_lock);
5813         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5814                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5815         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5816                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5817         return rc;
5818 }
5819
5820 #define NVRAM_SIZE 0x200
5821 #define CRC32_RESIDUAL 0xdebb20e3
5822
5823 static int
5824 bnx2_test_nvram(struct bnx2 *bp)
5825 {
5826         __be32 buf[NVRAM_SIZE / 4];
5827         u8 *data = (u8 *) buf;
5828         int rc = 0;
5829         u32 magic, csum;
5830
5831         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5832                 goto test_nvram_done;
5833
5834         magic = be32_to_cpu(buf[0]);
5835         if (magic != 0x669955aa) {
5836                 rc = -ENODEV;
5837                 goto test_nvram_done;
5838         }
5839
5840         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5841                 goto test_nvram_done;
5842
5843         csum = ether_crc_le(0x100, data);
5844         if (csum != CRC32_RESIDUAL) {
5845                 rc = -ENODEV;
5846                 goto test_nvram_done;
5847         }
5848
5849         csum = ether_crc_le(0x100, data + 0x100);
5850         if (csum != CRC32_RESIDUAL) {
5851                 rc = -ENODEV;
5852         }
5853
5854 test_nvram_done:
5855         return rc;
5856 }
5857
5858 static int
5859 bnx2_test_link(struct bnx2 *bp)
5860 {
5861         u32 bmsr;
5862
5863         if (!netif_running(bp->dev))
5864                 return -ENODEV;
5865
5866         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5867                 if (bp->link_up)
5868                         return 0;
5869                 return -ENODEV;
5870         }
5871         spin_lock_bh(&bp->phy_lock);
5872         bnx2_enable_bmsr1(bp);
5873         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5875         bnx2_disable_bmsr1(bp);
5876         spin_unlock_bh(&bp->phy_lock);
5877
5878         if (bmsr & BMSR_LSTATUS) {
5879                 return 0;
5880         }
5881         return -ENODEV;
5882 }
5883
5884 static int
5885 bnx2_test_intr(struct bnx2 *bp)
5886 {
5887         int i;
5888         u16 status_idx;
5889
5890         if (!netif_running(bp->dev))
5891                 return -ENODEV;
5892
5893         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5894
5895         /* This register is not touched during run-time. */
5896         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5897         REG_RD(bp, BNX2_HC_COMMAND);
5898
5899         for (i = 0; i < 10; i++) {
5900                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5901                         status_idx) {
5902
5903                         break;
5904                 }
5905
5906                 msleep_interruptible(10);
5907         }
5908         if (i < 10)
5909                 return 0;
5910
5911         return -ENODEV;
5912 }
5913
5914 /* Determining link for parallel detection. */
5915 static int
5916 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5917 {
5918         u32 mode_ctl, an_dbg, exp;
5919
5920         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5921                 return 0;
5922
5923         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5924         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5925
5926         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5927                 return 0;
5928
5929         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5930         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5932
5933         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5934                 return 0;
5935
5936         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5937         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5939
5940         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5941                 return 0;
5942
5943         return 1;
5944 }
5945
5946 static void
5947 bnx2_5706_serdes_timer(struct bnx2 *bp)
5948 {
5949         int check_link = 1;
5950
5951         spin_lock(&bp->phy_lock);
5952         if (bp->serdes_an_pending) {
5953                 bp->serdes_an_pending--;
5954                 check_link = 0;
5955         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5956                 u32 bmcr;
5957
5958                 bp->current_interval = BNX2_TIMER_INTERVAL;
5959
5960                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5961
5962                 if (bmcr & BMCR_ANENABLE) {
5963                         if (bnx2_5706_serdes_has_link(bp)) {
5964                                 bmcr &= ~BMCR_ANENABLE;
5965                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5966                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5967                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5968                         }
5969                 }
5970         }
5971         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5972                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5973                 u32 phy2;
5974
5975                 bnx2_write_phy(bp, 0x17, 0x0f01);
5976                 bnx2_read_phy(bp, 0x15, &phy2);
5977                 if (phy2 & 0x20) {
5978                         u32 bmcr;
5979
5980                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5981                         bmcr |= BMCR_ANENABLE;
5982                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5983
5984                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5985                 }
5986         } else
5987                 bp->current_interval = BNX2_TIMER_INTERVAL;
5988
5989         if (check_link) {
5990                 u32 val;
5991
5992                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5993                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5995
5996                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5997                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5998                                 bnx2_5706s_force_link_dn(bp, 1);
5999                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6000                         } else
6001                                 bnx2_set_link(bp);
6002                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6003                         bnx2_set_link(bp);
6004         }
6005         spin_unlock(&bp->phy_lock);
6006 }
6007
6008 static void
6009 bnx2_5708_serdes_timer(struct bnx2 *bp)
6010 {
6011         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6012                 return;
6013
6014         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6015                 bp->serdes_an_pending = 0;
6016                 return;
6017         }
6018
6019         spin_lock(&bp->phy_lock);
6020         if (bp->serdes_an_pending)
6021                 bp->serdes_an_pending--;
6022         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023                 u32 bmcr;
6024
6025                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6026                 if (bmcr & BMCR_ANENABLE) {
6027                         bnx2_enable_forced_2g5(bp);
6028                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6029                 } else {