c281218f6d5e0b95f359663bfe757a1c599e5a18
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.9"
62 #define DRV_MODULE_RELDATE      "April 27, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         smp_mb();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = bp->cnic_ops;
439         if (c_ops) {
440                 info.cmd = CNIC_CTL_STOP_CMD;
441                 c_ops->cnic_ctl(bp->cnic_data, &info);
442         }
443         mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449         struct cnic_ops *c_ops;
450         struct cnic_ctl_info info;
451
452         mutex_lock(&bp->cnic_lock);
453         c_ops = bp->cnic_ops;
454         if (c_ops) {
455                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458                         bnapi->cnic_tag = bnapi->last_status_idx;
459                 }
460                 info.cmd = CNIC_CTL_START_CMD;
461                 c_ops->cnic_ctl(bp->cnic_data, &info);
462         }
463         mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483         u32 val1;
484         int i, ret;
485
486         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493                 udelay(40);
494         }
495
496         val1 = (bp->phy_addr << 21) | (reg << 16) |
497                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498                 BNX2_EMAC_MDIO_COMM_START_BUSY;
499         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501         for (i = 0; i < 50; i++) {
502                 udelay(10);
503
504                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506                         udelay(5);
507
508                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511                         break;
512                 }
513         }
514
515         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516                 *val = 0x0;
517                 ret = -EBUSY;
518         }
519         else {
520                 *val = val1;
521                 ret = 0;
522         }
523
524         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531                 udelay(40);
532         }
533
534         return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540         u32 val1;
541         int i, ret;
542
543         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550                 udelay(40);
551         }
552
553         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558         for (i = 0; i < 50; i++) {
559                 udelay(10);
560
561                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563                         udelay(5);
564                         break;
565                 }
566         }
567
568         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569                 ret = -EBUSY;
570         else
571                 ret = 0;
572
573         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580                 udelay(40);
581         }
582
583         return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589         int i;
590         struct bnx2_napi *bnapi;
591
592         for (i = 0; i < bp->irq_nvecs; i++) {
593                 bnapi = &bp->bnx2_napi[i];
594                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596         }
597         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603         int i;
604         struct bnx2_napi *bnapi;
605
606         for (i = 0; i < bp->irq_nvecs; i++) {
607                 bnapi = &bp->bnx2_napi[i];
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612                        bnapi->last_status_idx);
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        bnapi->last_status_idx);
617         }
618         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624         int i;
625
626         atomic_inc(&bp->intr_sem);
627         if (!netif_running(bp->dev))
628                 return;
629
630         bnx2_disable_int(bp);
631         for (i = 0; i < bp->irq_nvecs; i++)
632                 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638         int i;
639
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655 {
656         if (stop_cnic)
657                 bnx2_cnic_stop(bp);
658         if (netif_running(bp->dev)) {
659                 int i;
660
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663                 /* prevent tx timeout */
664                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
665                         struct netdev_queue *txq;
666
667                         txq = netdev_get_tx_queue(bp->dev, i);
668                         txq->trans_start = jiffies;
669                 }
670         }
671         bnx2_disable_int_sync(bp);
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         bnx2_napi_enable(bp);
681                         bnx2_enable_int(bp);
682                         if (start_cnic)
683                                 bnx2_cnic_start(bp);
684                 }
685         }
686 }
687
688 static void
689 bnx2_free_tx_mem(struct bnx2 *bp)
690 {
691         int i;
692
693         for (i = 0; i < bp->num_tx_rings; i++) {
694                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697                 if (txr->tx_desc_ring) {
698                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699                                             txr->tx_desc_ring,
700                                             txr->tx_desc_mapping);
701                         txr->tx_desc_ring = NULL;
702                 }
703                 kfree(txr->tx_buf_ring);
704                 txr->tx_buf_ring = NULL;
705         }
706 }
707
708 static void
709 bnx2_free_rx_mem(struct bnx2 *bp)
710 {
711         int i;
712
713         for (i = 0; i < bp->num_rx_rings; i++) {
714                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716                 int j;
717
718                 for (j = 0; j < bp->rx_max_ring; j++) {
719                         if (rxr->rx_desc_ring[j])
720                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721                                                     rxr->rx_desc_ring[j],
722                                                     rxr->rx_desc_mapping[j]);
723                         rxr->rx_desc_ring[j] = NULL;
724                 }
725                 vfree(rxr->rx_buf_ring);
726                 rxr->rx_buf_ring = NULL;
727
728                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729                         if (rxr->rx_pg_desc_ring[j])
730                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
731                                                     rxr->rx_pg_desc_ring[j],
732                                                     rxr->rx_pg_desc_mapping[j]);
733                         rxr->rx_pg_desc_ring[j] = NULL;
734                 }
735                 vfree(rxr->rx_pg_ring);
736                 rxr->rx_pg_ring = NULL;
737         }
738 }
739
740 static int
741 bnx2_alloc_tx_mem(struct bnx2 *bp)
742 {
743         int i;
744
745         for (i = 0; i < bp->num_tx_rings; i++) {
746                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750                 if (txr->tx_buf_ring == NULL)
751                         return -ENOMEM;
752
753                 txr->tx_desc_ring =
754                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755                                              &txr->tx_desc_mapping);
756                 if (txr->tx_desc_ring == NULL)
757                         return -ENOMEM;
758         }
759         return 0;
760 }
761
762 static int
763 bnx2_alloc_rx_mem(struct bnx2 *bp)
764 {
765         int i;
766
767         for (i = 0; i < bp->num_rx_rings; i++) {
768                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770                 int j;
771
772                 rxr->rx_buf_ring =
773                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774                 if (rxr->rx_buf_ring == NULL)
775                         return -ENOMEM;
776
777                 memset(rxr->rx_buf_ring, 0,
778                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780                 for (j = 0; j < bp->rx_max_ring; j++) {
781                         rxr->rx_desc_ring[j] =
782                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783                                                      &rxr->rx_desc_mapping[j]);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796                                bp->rx_max_pg_ring);
797                 }
798
799                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800                         rxr->rx_pg_desc_ring[j] =
801                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802                                                 &rxr->rx_pg_desc_mapping[j]);
803                         if (rxr->rx_pg_desc_ring[j] == NULL)
804                                 return -ENOMEM;
805
806                 }
807         }
808         return 0;
809 }
810
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
813 {
814         int i;
815         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
816
817         bnx2_free_tx_mem(bp);
818         bnx2_free_rx_mem(bp);
819
820         for (i = 0; i < bp->ctx_pages; i++) {
821                 if (bp->ctx_blk[i]) {
822                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823                                             bp->ctx_blk[i],
824                                             bp->ctx_blk_mapping[i]);
825                         bp->ctx_blk[i] = NULL;
826                 }
827         }
828         if (bnapi->status_blk.msi) {
829                 pci_free_consistent(bp->pdev, bp->status_stats_size,
830                                     bnapi->status_blk.msi,
831                                     bp->status_blk_mapping);
832                 bnapi->status_blk.msi = NULL;
833                 bp->stats_blk = NULL;
834         }
835 }
836
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
839 {
840         int i, status_blk_size, err;
841         struct bnx2_napi *bnapi;
842         void *status_blk;
843
844         /* Combine status and statistics blocks into one allocation. */
845         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846         if (bp->flags & BNX2_FLAG_MSIX_CAP)
847                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
849         bp->status_stats_size = status_blk_size +
850                                 sizeof(struct statistics_block);
851
852         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853                                           &bp->status_blk_mapping);
854         if (status_blk == NULL)
855                 goto alloc_mem_err;
856
857         memset(status_blk, 0, bp->status_stats_size);
858
859         bnapi = &bp->bnx2_napi[0];
860         bnapi->status_blk.msi = status_blk;
861         bnapi->hw_tx_cons_ptr =
862                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863         bnapi->hw_rx_cons_ptr =
864                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
867                         struct status_block_msix *sblk;
868
869                         bnapi = &bp->bnx2_napi[i];
870
871                         sblk = (void *) (status_blk +
872                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873                         bnapi->status_blk.msix = sblk;
874                         bnapi->hw_tx_cons_ptr =
875                                 &sblk->status_tx_quick_consumer_index;
876                         bnapi->hw_rx_cons_ptr =
877                                 &sblk->status_rx_quick_consumer_index;
878                         bnapi->int_num = i << 24;
879                 }
880         }
881
882         bp->stats_blk = status_blk + status_blk_size;
883
884         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888                 if (bp->ctx_pages == 0)
889                         bp->ctx_pages = 1;
890                 for (i = 0; i < bp->ctx_pages; i++) {
891                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892                                                 BCM_PAGE_SIZE,
893                                                 &bp->ctx_blk_mapping[i]);
894                         if (bp->ctx_blk[i] == NULL)
895                                 goto alloc_mem_err;
896                 }
897         }
898
899         err = bnx2_alloc_rx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         err = bnx2_alloc_tx_mem(bp);
904         if (err)
905                 goto alloc_mem_err;
906
907         return 0;
908
909 alloc_mem_err:
910         bnx2_free_mem(bp);
911         return -ENOMEM;
912 }
913
914 static void
915 bnx2_report_fw_link(struct bnx2 *bp)
916 {
917         u32 fw_link_status = 0;
918
919         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
920                 return;
921
922         if (bp->link_up) {
923                 u32 bmsr;
924
925                 switch (bp->line_speed) {
926                 case SPEED_10:
927                         if (bp->duplex == DUPLEX_HALF)
928                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
929                         else
930                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
931                         break;
932                 case SPEED_100:
933                         if (bp->duplex == DUPLEX_HALF)
934                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
935                         else
936                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
937                         break;
938                 case SPEED_1000:
939                         if (bp->duplex == DUPLEX_HALF)
940                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941                         else
942                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943                         break;
944                 case SPEED_2500:
945                         if (bp->duplex == DUPLEX_HALF)
946                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947                         else
948                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949                         break;
950                 }
951
952                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954                 if (bp->autoneg) {
955                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
957                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959
960                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
961                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
962                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963                         else
964                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965                 }
966         }
967         else
968                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
970         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 }
972
973 static char *
974 bnx2_xceiver_str(struct bnx2 *bp)
975 {
976         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
977                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
978                  "Copper"));
979 }
980
981 static void
982 bnx2_report_link(struct bnx2 *bp)
983 {
984         if (bp->link_up) {
985                 netif_carrier_on(bp->dev);
986                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987                             bnx2_xceiver_str(bp),
988                             bp->line_speed,
989                             bp->duplex == DUPLEX_FULL ? "full" : "half");
990
991                 if (bp->flow_ctrl) {
992                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
993                                 pr_cont(", receive ");
994                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
995                                         pr_cont("& transmit ");
996                         }
997                         else {
998                                 pr_cont(", transmit ");
999                         }
1000                         pr_cont("flow control ON");
1001                 }
1002                 pr_cont("\n");
1003         } else {
1004                 netif_carrier_off(bp->dev);
1005                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006                            bnx2_xceiver_str(bp));
1007         }
1008
1009         bnx2_report_fw_link(bp);
1010 }
1011
1012 static void
1013 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014 {
1015         u32 local_adv, remote_adv;
1016
1017         bp->flow_ctrl = 0;
1018         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1019                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021                 if (bp->duplex == DUPLEX_FULL) {
1022                         bp->flow_ctrl = bp->req_flow_ctrl;
1023                 }
1024                 return;
1025         }
1026
1027         if (bp->duplex != DUPLEX_FULL) {
1028                 return;
1029         }
1030
1031         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1032             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033                 u32 val;
1034
1035                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037                         bp->flow_ctrl |= FLOW_CTRL_TX;
1038                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039                         bp->flow_ctrl |= FLOW_CTRL_RX;
1040                 return;
1041         }
1042
1043         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1045
1046         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1047                 u32 new_local_adv = 0;
1048                 u32 new_remote_adv = 0;
1049
1050                 if (local_adv & ADVERTISE_1000XPAUSE)
1051                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054                 if (remote_adv & ADVERTISE_1000XPAUSE)
1055                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059                 local_adv = new_local_adv;
1060                 remote_adv = new_remote_adv;
1061         }
1062
1063         /* See Table 28B-3 of 802.3ab-1999 spec. */
1064         if (local_adv & ADVERTISE_PAUSE_CAP) {
1065                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068                         }
1069                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070                                 bp->flow_ctrl = FLOW_CTRL_RX;
1071                         }
1072                 }
1073                 else {
1074                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076                         }
1077                 }
1078         }
1079         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083                         bp->flow_ctrl = FLOW_CTRL_TX;
1084                 }
1085         }
1086 }
1087
1088 static int
1089 bnx2_5709s_linkup(struct bnx2 *bp)
1090 {
1091         u32 val, speed;
1092
1093         bp->link_up = 1;
1094
1095         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100                 bp->line_speed = bp->req_line_speed;
1101                 bp->duplex = bp->req_duplex;
1102                 return 0;
1103         }
1104         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105         switch (speed) {
1106                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107                         bp->line_speed = SPEED_10;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110                         bp->line_speed = SPEED_100;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114                         bp->line_speed = SPEED_1000;
1115                         break;
1116                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117                         bp->line_speed = SPEED_2500;
1118                         break;
1119         }
1120         if (val & MII_BNX2_GP_TOP_AN_FD)
1121                 bp->duplex = DUPLEX_FULL;
1122         else
1123                 bp->duplex = DUPLEX_HALF;
1124         return 0;
1125 }
1126
1127 static int
1128 bnx2_5708s_linkup(struct bnx2 *bp)
1129 {
1130         u32 val;
1131
1132         bp->link_up = 1;
1133         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135                 case BCM5708S_1000X_STAT1_SPEED_10:
1136                         bp->line_speed = SPEED_10;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_100:
1139                         bp->line_speed = SPEED_100;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_1G:
1142                         bp->line_speed = SPEED_1000;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145                         bp->line_speed = SPEED_2500;
1146                         break;
1147         }
1148         if (val & BCM5708S_1000X_STAT1_FD)
1149                 bp->duplex = DUPLEX_FULL;
1150         else
1151                 bp->duplex = DUPLEX_HALF;
1152
1153         return 0;
1154 }
1155
1156 static int
1157 bnx2_5706s_linkup(struct bnx2 *bp)
1158 {
1159         u32 bmcr, local_adv, remote_adv, common;
1160
1161         bp->link_up = 1;
1162         bp->line_speed = SPEED_1000;
1163
1164         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1165         if (bmcr & BMCR_FULLDPLX) {
1166                 bp->duplex = DUPLEX_FULL;
1167         }
1168         else {
1169                 bp->duplex = DUPLEX_HALF;
1170         }
1171
1172         if (!(bmcr & BMCR_ANENABLE)) {
1173                 return 0;
1174         }
1175
1176         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1178
1179         common = local_adv & remote_adv;
1180         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182                 if (common & ADVERTISE_1000XFULL) {
1183                         bp->duplex = DUPLEX_FULL;
1184                 }
1185                 else {
1186                         bp->duplex = DUPLEX_HALF;
1187                 }
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int
1194 bnx2_copper_linkup(struct bnx2 *bp)
1195 {
1196         u32 bmcr;
1197
1198         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1199         if (bmcr & BMCR_ANENABLE) {
1200                 u32 local_adv, remote_adv, common;
1201
1202                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205                 common = local_adv & (remote_adv >> 2);
1206                 if (common & ADVERTISE_1000FULL) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_FULL;
1209                 }
1210                 else if (common & ADVERTISE_1000HALF) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_HALF;
1213                 }
1214                 else {
1215                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1217
1218                         common = local_adv & remote_adv;
1219                         if (common & ADVERTISE_100FULL) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_FULL;
1222                         }
1223                         else if (common & ADVERTISE_100HALF) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_HALF;
1226                         }
1227                         else if (common & ADVERTISE_10FULL) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_FULL;
1230                         }
1231                         else if (common & ADVERTISE_10HALF) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_HALF;
1234                         }
1235                         else {
1236                                 bp->line_speed = 0;
1237                                 bp->link_up = 0;
1238                         }
1239                 }
1240         }
1241         else {
1242                 if (bmcr & BMCR_SPEED100) {
1243                         bp->line_speed = SPEED_100;
1244                 }
1245                 else {
1246                         bp->line_speed = SPEED_10;
1247                 }
1248                 if (bmcr & BMCR_FULLDPLX) {
1249                         bp->duplex = DUPLEX_FULL;
1250                 }
1251                 else {
1252                         bp->duplex = DUPLEX_HALF;
1253                 }
1254         }
1255
1256         return 0;
1257 }
1258
1259 static void
1260 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1261 {
1262         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1263
1264         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266         val |= 0x02 << 8;
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269                 u32 lo_water, hi_water;
1270
1271                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273                 else
1274                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275                 if (lo_water >= bp->rx_ring_size)
1276                         lo_water = 0;
1277
1278                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1279
1280                 if (hi_water <= lo_water)
1281                         lo_water = 0;
1282
1283                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286                 if (hi_water > 0xf)
1287                         hi_water = 0xf;
1288                 else if (hi_water == 0)
1289                         lo_water = 0;
1290                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291         }
1292         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 }
1294
1295 static void
1296 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297 {
1298         int i;
1299         u32 cid;
1300
1301         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302                 if (i == 1)
1303                         cid = RX_RSS_CID;
1304                 bnx2_init_rx_context(bp, cid);
1305         }
1306 }
1307
1308 static void
1309 bnx2_set_mac_link(struct bnx2 *bp)
1310 {
1311         u32 val;
1312
1313         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315                 (bp->duplex == DUPLEX_HALF)) {
1316                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317         }
1318
1319         /* Configure the EMAC mode register. */
1320         val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1323                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1324                 BNX2_EMAC_MODE_25G_MODE);
1325
1326         if (bp->link_up) {
1327                 switch (bp->line_speed) {
1328                         case SPEED_10:
1329                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1331                                         break;
1332                                 }
1333                                 /* fall through */
1334                         case SPEED_100:
1335                                 val |= BNX2_EMAC_MODE_PORT_MII;
1336                                 break;
1337                         case SPEED_2500:
1338                                 val |= BNX2_EMAC_MODE_25G_MODE;
1339                                 /* fall through */
1340                         case SPEED_1000:
1341                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342                                 break;
1343                 }
1344         }
1345         else {
1346                 val |= BNX2_EMAC_MODE_PORT_GMII;
1347         }
1348
1349         /* Set the MAC to operate in the appropriate duplex mode. */
1350         if (bp->duplex == DUPLEX_HALF)
1351                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352         REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354         /* Enable/disable rx PAUSE. */
1355         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357         if (bp->flow_ctrl & FLOW_CTRL_RX)
1358                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361         /* Enable/disable tx PAUSE. */
1362         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365         if (bp->flow_ctrl & FLOW_CTRL_TX)
1366                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369         /* Acknowledge the interrupt. */
1370         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
1372         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1373                 bnx2_init_all_rx_contexts(bp);
1374 }
1375
1376 static void
1377 bnx2_enable_bmsr1(struct bnx2 *bp)
1378 {
1379         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380             (CHIP_NUM(bp) == CHIP_NUM_5709))
1381                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382                                MII_BNX2_BLK_ADDR_GP_STATUS);
1383 }
1384
1385 static void
1386 bnx2_disable_bmsr1(struct bnx2 *bp)
1387 {
1388         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1389             (CHIP_NUM(bp) == CHIP_NUM_5709))
1390                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 }
1393
1394 static int
1395 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396 {
1397         u32 up1;
1398         int ret = 1;
1399
1400         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1401                 return 0;
1402
1403         if (bp->autoneg & AUTONEG_SPEED)
1404                 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
1406         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409         bnx2_read_phy(bp, bp->mii_up1, &up1);
1410         if (!(up1 & BCM5708S_UP1_2G5)) {
1411                 up1 |= BCM5708S_UP1_2G5;
1412                 bnx2_write_phy(bp, bp->mii_up1, up1);
1413                 ret = 0;
1414         }
1415
1416         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420         return ret;
1421 }
1422
1423 static int
1424 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425 {
1426         u32 up1;
1427         int ret = 0;
1428
1429         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430                 return 0;
1431
1432         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
1435         bnx2_read_phy(bp, bp->mii_up1, &up1);
1436         if (up1 & BCM5708S_UP1_2G5) {
1437                 up1 &= ~BCM5708S_UP1_2G5;
1438                 bnx2_write_phy(bp, bp->mii_up1, up1);
1439                 ret = 1;
1440         }
1441
1442         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
1446         return ret;
1447 }
1448
1449 static void
1450 bnx2_enable_forced_2g5(struct bnx2 *bp)
1451 {
1452         u32 bmcr;
1453
1454         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1455                 return;
1456
1457         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458                 u32 val;
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1462                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1474         } else {
1475                 return;
1476         }
1477
1478         if (bp->autoneg & AUTONEG_SPEED) {
1479                 bmcr &= ~BMCR_ANENABLE;
1480                 if (bp->req_duplex == DUPLEX_FULL)
1481                         bmcr |= BMCR_FULLDPLX;
1482         }
1483         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484 }
1485
1486 static void
1487 bnx2_disable_forced_2g5(struct bnx2 *bp)
1488 {
1489         u32 bmcr;
1490
1491         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1492                 return;
1493
1494         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495                 u32 val;
1496
1497                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1499                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1508                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510         } else {
1511                 return;
1512         }
1513
1514         if (bp->autoneg & AUTONEG_SPEED)
1515                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517 }
1518
1519 static void
1520 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521 {
1522         u32 val;
1523
1524         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526         if (start)
1527                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528         else
1529                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530 }
1531
1532 static int
1533 bnx2_set_link(struct bnx2 *bp)
1534 {
1535         u32 bmsr;
1536         u8 link_up;
1537
1538         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1539                 bp->link_up = 1;
1540                 return 0;
1541         }
1542
1543         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1544                 return 0;
1545
1546         link_up = bp->link_up;
1547
1548         bnx2_enable_bmsr1(bp);
1549         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551         bnx2_disable_bmsr1(bp);
1552
1553         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1554             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1555                 u32 val, an_dbg;
1556
1557                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1558                         bnx2_5706s_force_link_dn(bp, 0);
1559                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1560                 }
1561                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1562
1563                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1569                         bmsr |= BMSR_LSTATUS;
1570                 else
1571                         bmsr &= ~BMSR_LSTATUS;
1572         }
1573
1574         if (bmsr & BMSR_LSTATUS) {
1575                 bp->link_up = 1;
1576
1577                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1578                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579                                 bnx2_5706s_linkup(bp);
1580                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581                                 bnx2_5708s_linkup(bp);
1582                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583                                 bnx2_5709s_linkup(bp);
1584                 }
1585                 else {
1586                         bnx2_copper_linkup(bp);
1587                 }
1588                 bnx2_resolve_flow_ctrl(bp);
1589         }
1590         else {
1591                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1592                     (bp->autoneg & AUTONEG_SPEED))
1593                         bnx2_disable_forced_2g5(bp);
1594
1595                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1596                         u32 bmcr;
1597
1598                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599                         bmcr |= BMCR_ANENABLE;
1600                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
1602                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1603                 }
1604                 bp->link_up = 0;
1605         }
1606
1607         if (bp->link_up != link_up) {
1608                 bnx2_report_link(bp);
1609         }
1610
1611         bnx2_set_mac_link(bp);
1612
1613         return 0;
1614 }
1615
1616 static int
1617 bnx2_reset_phy(struct bnx2 *bp)
1618 {
1619         int i;
1620         u32 reg;
1621
1622         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1623
1624 #define PHY_RESET_MAX_WAIT 100
1625         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626                 udelay(10);
1627
1628                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1629                 if (!(reg & BMCR_RESET)) {
1630                         udelay(20);
1631                         break;
1632                 }
1633         }
1634         if (i == PHY_RESET_MAX_WAIT) {
1635                 return -EBUSY;
1636         }
1637         return 0;
1638 }
1639
1640 static u32
1641 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642 {
1643         u32 adv = 0;
1644
1645         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
1648                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1649                         adv = ADVERTISE_1000XPAUSE;
1650                 }
1651                 else {
1652                         adv = ADVERTISE_PAUSE_CAP;
1653                 }
1654         }
1655         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1656                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1657                         adv = ADVERTISE_1000XPSE_ASYM;
1658                 }
1659                 else {
1660                         adv = ADVERTISE_PAUSE_ASYM;
1661                 }
1662         }
1663         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1664                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666                 }
1667                 else {
1668                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669                 }
1670         }
1671         return adv;
1672 }
1673
1674 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1675
1676 static int
1677 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1678 __releases(&bp->phy_lock)
1679 __acquires(&bp->phy_lock)
1680 {
1681         u32 speed_arg = 0, pause_adv;
1682
1683         pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685         if (bp->autoneg & AUTONEG_SPEED) {
1686                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687                 if (bp->advertising & ADVERTISED_10baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689                 if (bp->advertising & ADVERTISED_10baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691                 if (bp->advertising & ADVERTISED_100baseT_Half)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693                 if (bp->advertising & ADVERTISED_100baseT_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699         } else {
1700                 if (bp->req_line_speed == SPEED_2500)
1701                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702                 else if (bp->req_line_speed == SPEED_1000)
1703                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704                 else if (bp->req_line_speed == SPEED_100) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709                 } else if (bp->req_line_speed == SPEED_10) {
1710                         if (bp->req_duplex == DUPLEX_FULL)
1711                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712                         else
1713                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714                 }
1715         }
1716
1717         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1719         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1720                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722         if (port == PORT_TP)
1723                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
1726         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1727
1728         spin_unlock_bh(&bp->phy_lock);
1729         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1730         spin_lock_bh(&bp->phy_lock);
1731
1732         return 0;
1733 }
1734
1735 static int
1736 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1737 __releases(&bp->phy_lock)
1738 __acquires(&bp->phy_lock)
1739 {
1740         u32 adv, bmcr;
1741         u32 new_adv = 0;
1742
1743         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1744                 return (bnx2_setup_remote_phy(bp, port));
1745
1746         if (!(bp->autoneg & AUTONEG_SPEED)) {
1747                 u32 new_bmcr;
1748                 int force_link_down = 0;
1749
1750                 if (bp->req_line_speed == SPEED_2500) {
1751                         if (!bnx2_test_and_enable_2g5(bp))
1752                                 force_link_down = 1;
1753                 } else if (bp->req_line_speed == SPEED_1000) {
1754                         if (bnx2_test_and_disable_2g5(bp))
1755                                 force_link_down = 1;
1756                 }
1757                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1758                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
1760                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1761                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1762                 new_bmcr |= BMCR_SPEED1000;
1763
1764                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765                         if (bp->req_line_speed == SPEED_2500)
1766                                 bnx2_enable_forced_2g5(bp);
1767                         else if (bp->req_line_speed == SPEED_1000) {
1768                                 bnx2_disable_forced_2g5(bp);
1769                                 new_bmcr &= ~0x2000;
1770                         }
1771
1772                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1773                         if (bp->req_line_speed == SPEED_2500)
1774                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775                         else
1776                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1777                 }
1778
1779                 if (bp->req_duplex == DUPLEX_FULL) {
1780                         adv |= ADVERTISE_1000XFULL;
1781                         new_bmcr |= BMCR_FULLDPLX;
1782                 }
1783                 else {
1784                         adv |= ADVERTISE_1000XHALF;
1785                         new_bmcr &= ~BMCR_FULLDPLX;
1786                 }
1787                 if ((new_bmcr != bmcr) || (force_link_down)) {
1788                         /* Force a link down visible on the other side */
1789                         if (bp->link_up) {
1790                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1791                                                ~(ADVERTISE_1000XFULL |
1792                                                  ADVERTISE_1000XHALF));
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1794                                         BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796                                 bp->link_up = 0;
1797                                 netif_carrier_off(bp->dev);
1798                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                                 bnx2_report_link(bp);
1800                         }
1801                         bnx2_write_phy(bp, bp->mii_adv, adv);
1802                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1803                 } else {
1804                         bnx2_resolve_flow_ctrl(bp);
1805                         bnx2_set_mac_link(bp);
1806                 }
1807                 return 0;
1808         }
1809
1810         bnx2_test_and_enable_2g5(bp);
1811
1812         if (bp->advertising & ADVERTISED_1000baseT_Full)
1813                 new_adv |= ADVERTISE_1000XFULL;
1814
1815         new_adv |= bnx2_phy_get_pause_adv(bp);
1816
1817         bnx2_read_phy(bp, bp->mii_adv, &adv);
1818         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1819
1820         bp->serdes_an_pending = 0;
1821         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822                 /* Force a link down visible on the other side */
1823                 if (bp->link_up) {
1824                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1825                         spin_unlock_bh(&bp->phy_lock);
1826                         msleep(20);
1827                         spin_lock_bh(&bp->phy_lock);
1828                 }
1829
1830                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1832                         BMCR_ANENABLE);
1833                 /* Speed up link-up time when the link partner
1834                  * does not autonegotiate which is very common
1835                  * in blade servers. Some blade servers use
1836                  * IPMI for kerboard input and it's important
1837                  * to minimize link disruptions. Autoneg. involves
1838                  * exchanging base pages plus 3 next pages and
1839                  * normally completes in about 120 msec.
1840                  */
1841                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1842                 bp->serdes_an_pending = 1;
1843                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1844         } else {
1845                 bnx2_resolve_flow_ctrl(bp);
1846                 bnx2_set_mac_link(bp);
1847         }
1848
1849         return 0;
1850 }
1851
1852 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1853         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1854                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855                 (ADVERTISED_1000baseT_Full)
1856
1857 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1858         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1859         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1860         ADVERTISED_1000baseT_Full)
1861
1862 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1864
1865 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
1867 static void
1868 bnx2_set_default_remote_link(struct bnx2 *bp)
1869 {
1870         u32 link;
1871
1872         if (bp->phy_port == PORT_TP)
1873                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1874         else
1875                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1876
1877         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878                 bp->req_line_speed = 0;
1879                 bp->autoneg |= AUTONEG_SPEED;
1880                 bp->advertising = ADVERTISED_Autoneg;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882                         bp->advertising |= ADVERTISED_10baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884                         bp->advertising |= ADVERTISED_10baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886                         bp->advertising |= ADVERTISED_100baseT_Half;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888                         bp->advertising |= ADVERTISED_100baseT_Full;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890                         bp->advertising |= ADVERTISED_1000baseT_Full;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892                         bp->advertising |= ADVERTISED_2500baseX_Full;
1893         } else {
1894                 bp->autoneg = 0;
1895                 bp->advertising = 0;
1896                 bp->req_duplex = DUPLEX_FULL;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898                         bp->req_line_speed = SPEED_10;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903                         bp->req_line_speed = SPEED_100;
1904                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905                                 bp->req_duplex = DUPLEX_HALF;
1906                 }
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908                         bp->req_line_speed = SPEED_1000;
1909                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910                         bp->req_line_speed = SPEED_2500;
1911         }
1912 }
1913
1914 static void
1915 bnx2_set_default_link(struct bnx2 *bp)
1916 {
1917         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918                 bnx2_set_default_remote_link(bp);
1919                 return;
1920         }
1921
1922         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923         bp->req_line_speed = 0;
1924         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1925                 u32 reg;
1926
1927                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
1929                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1930                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932                         bp->autoneg = 0;
1933                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1934                         bp->req_duplex = DUPLEX_FULL;
1935                 }
1936         } else
1937                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938 }
1939
1940 static void
1941 bnx2_send_heart_beat(struct bnx2 *bp)
1942 {
1943         u32 msg;
1944         u32 addr;
1945
1946         spin_lock(&bp->indirect_lock);
1947         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951         spin_unlock(&bp->indirect_lock);
1952 }
1953
1954 static void
1955 bnx2_remote_phy_event(struct bnx2 *bp)
1956 {
1957         u32 msg;
1958         u8 link_up = bp->link_up;
1959         u8 old_port;
1960
1961         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1962
1963         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964                 bnx2_send_heart_beat(bp);
1965
1966         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
1968         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969                 bp->link_up = 0;
1970         else {
1971                 u32 speed;
1972
1973                 bp->link_up = 1;
1974                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975                 bp->duplex = DUPLEX_FULL;
1976                 switch (speed) {
1977                         case BNX2_LINK_STATUS_10HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_10FULL:
1980                                 bp->line_speed = SPEED_10;
1981                                 break;
1982                         case BNX2_LINK_STATUS_100HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_100BASE_T4:
1985                         case BNX2_LINK_STATUS_100FULL:
1986                                 bp->line_speed = SPEED_100;
1987                                 break;
1988                         case BNX2_LINK_STATUS_1000HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_1000FULL:
1991                                 bp->line_speed = SPEED_1000;
1992                                 break;
1993                         case BNX2_LINK_STATUS_2500HALF:
1994                                 bp->duplex = DUPLEX_HALF;
1995                         case BNX2_LINK_STATUS_2500FULL:
1996                                 bp->line_speed = SPEED_2500;
1997                                 break;
1998                         default:
1999                                 bp->line_speed = 0;
2000                                 break;
2001                 }
2002
2003                 bp->flow_ctrl = 0;
2004                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006                         if (bp->duplex == DUPLEX_FULL)
2007                                 bp->flow_ctrl = bp->req_flow_ctrl;
2008                 } else {
2009                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2011                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2013                 }
2014
2015                 old_port = bp->phy_port;
2016                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017                         bp->phy_port = PORT_FIBRE;
2018                 else
2019                         bp->phy_port = PORT_TP;
2020
2021                 if (old_port != bp->phy_port)
2022                         bnx2_set_default_link(bp);
2023
2024         }
2025         if (bp->link_up != link_up)
2026                 bnx2_report_link(bp);
2027
2028         bnx2_set_mac_link(bp);
2029 }
2030
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034         u32 evt_code;
2035
2036         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037         switch (evt_code) {
2038                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039                         bnx2_remote_phy_event(bp);
2040                         break;
2041                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042                 default:
2043                         bnx2_send_heart_beat(bp);
2044                         break;
2045         }
2046         return 0;
2047 }
2048
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054         u32 bmcr;
2055         u32 new_bmcr;
2056
2057         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058
2059         if (bp->autoneg & AUTONEG_SPEED) {
2060                 u32 adv_reg, adv1000_reg;
2061                 u32 new_adv_reg = 0;
2062                 u32 new_adv1000_reg = 0;
2063
2064                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066                         ADVERTISE_PAUSE_ASYM);
2067
2068                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069                 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071                 if (bp->advertising & ADVERTISED_10baseT_Half)
2072                         new_adv_reg |= ADVERTISE_10HALF;
2073                 if (bp->advertising & ADVERTISED_10baseT_Full)
2074                         new_adv_reg |= ADVERTISE_10FULL;
2075                 if (bp->advertising & ADVERTISED_100baseT_Half)
2076                         new_adv_reg |= ADVERTISE_100HALF;
2077                 if (bp->advertising & ADVERTISED_100baseT_Full)
2078                         new_adv_reg |= ADVERTISE_100FULL;
2079                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080                         new_adv1000_reg |= ADVERTISE_1000FULL;
2081
2082                 new_adv_reg |= ADVERTISE_CSMA;
2083
2084                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086                 if ((adv1000_reg != new_adv1000_reg) ||
2087                         (adv_reg != new_adv_reg) ||
2088                         ((bmcr & BMCR_ANENABLE) == 0)) {
2089
2090                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2091                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2092                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2093                                 BMCR_ANENABLE);
2094                 }
2095                 else if (bp->link_up) {
2096                         /* Flow ctrl may have changed from auto to forced */
2097                         /* or vice-versa. */
2098
2099                         bnx2_resolve_flow_ctrl(bp);
2100                         bnx2_set_mac_link(bp);
2101                 }
2102                 return 0;
2103         }
2104
2105         new_bmcr = 0;
2106         if (bp->req_line_speed == SPEED_100) {
2107                 new_bmcr |= BMCR_SPEED100;
2108         }
2109         if (bp->req_duplex == DUPLEX_FULL) {
2110                 new_bmcr |= BMCR_FULLDPLX;
2111         }
2112         if (new_bmcr != bmcr) {
2113                 u32 bmsr;
2114
2115                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118                 if (bmsr & BMSR_LSTATUS) {
2119                         /* Force link down */
2120                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121                         spin_unlock_bh(&bp->phy_lock);
2122                         msleep(50);
2123                         spin_lock_bh(&bp->phy_lock);
2124
2125                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127                 }
2128
2129                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131                 /* Normally, the new speed is setup after the link has
2132                  * gone down and up again. In some cases, link will not go
2133                  * down so we need to set up the new speed here.
2134                  */
2135                 if (bmsr & BMSR_LSTATUS) {
2136                         bp->line_speed = bp->req_line_speed;
2137                         bp->duplex = bp->req_duplex;
2138                         bnx2_resolve_flow_ctrl(bp);
2139                         bnx2_set_mac_link(bp);
2140                 }
2141         } else {
2142                 bnx2_resolve_flow_ctrl(bp);
2143                 bnx2_set_mac_link(bp);
2144         }
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153         if (bp->loopback == MAC_LOOPBACK)
2154                 return 0;
2155
2156         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157                 return (bnx2_setup_serdes_phy(bp, port));
2158         }
2159         else {
2160                 return (bnx2_setup_copper_phy(bp));
2161         }
2162 }
2163
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167         u32 val;
2168
2169         bp->mii_bmcr = MII_BMCR + 0x10;
2170         bp->mii_bmsr = MII_BMSR + 0x10;
2171         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172         bp->mii_adv = MII_ADVERTISE + 0x10;
2173         bp->mii_lpa = MII_LPA + 0x10;
2174         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180         if (reset_phy)
2181                 bnx2_reset_phy(bp);
2182
2183         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193                 val |= BCM5708S_UP1_2G5;
2194         else
2195                 val &= ~BCM5708S_UP1_2G5;
2196         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211         return 0;
2212 }
2213
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217         u32 val;
2218
2219         if (reset_phy)
2220                 bnx2_reset_phy(bp);
2221
2222         bp->mii_up1 = BCM5708S_UP1;
2223
2224         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238                 val |= BCM5708S_UP1_2G5;
2239                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240         }
2241
2242         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2243             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2245                 /* increase tx signal amplitude */
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247                                BCM5708S_BLK_ADDR_TX_MISC);
2248                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252         }
2253
2254         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257         if (val) {
2258                 u32 is_backplane;
2259
2260                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                                        BCM5708S_BLK_ADDR_TX_MISC);
2264                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266                                        BCM5708S_BLK_ADDR_DIG);
2267                 }
2268         }
2269         return 0;
2270 }
2271
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275         if (reset_phy)
2276                 bnx2_reset_phy(bp);
2277
2278         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283         if (bp->dev->mtu > 1500) {
2284                 u32 val;
2285
2286                 /* Set extended packet length bit */
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294         }
2295         else {
2296                 u32 val;
2297
2298                 bnx2_write_phy(bp, 0x18, 0x7);
2299                 bnx2_read_phy(bp, 0x18, &val);
2300                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303                 bnx2_read_phy(bp, 0x1c, &val);
2304                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313         u32 val;
2314
2315         if (reset_phy)
2316                 bnx2_reset_phy(bp);
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319                 bnx2_write_phy(bp, 0x18, 0x0c00);
2320                 bnx2_write_phy(bp, 0x17, 0x000a);
2321                 bnx2_write_phy(bp, 0x15, 0x310b);
2322                 bnx2_write_phy(bp, 0x17, 0x201f);
2323                 bnx2_write_phy(bp, 0x15, 0x9506);
2324                 bnx2_write_phy(bp, 0x17, 0x401f);
2325                 bnx2_write_phy(bp, 0x15, 0x14e2);
2326                 bnx2_write_phy(bp, 0x18, 0x0400);
2327         }
2328
2329         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2332                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333                 val &= ~(1 << 8);
2334                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335         }
2336
2337         if (bp->dev->mtu > 1500) {
2338                 /* Set extended packet length bit */
2339                 bnx2_write_phy(bp, 0x18, 0x7);
2340                 bnx2_read_phy(bp, 0x18, &val);
2341                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343                 bnx2_read_phy(bp, 0x10, &val);
2344                 bnx2_write_phy(bp, 0x10, val | 0x1);
2345         }
2346         else {
2347                 bnx2_write_phy(bp, 0x18, 0x7);
2348                 bnx2_read_phy(bp, 0x18, &val);
2349                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351                 bnx2_read_phy(bp, 0x10, &val);
2352                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353         }
2354
2355         /* ethernet@wirespeed */
2356         bnx2_write_phy(bp, 0x18, 0x7007);
2357         bnx2_read_phy(bp, 0x18, &val);
2358         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2359         return 0;
2360 }
2361
2362
2363 static int
2364 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2365 __releases(&bp->phy_lock)
2366 __acquires(&bp->phy_lock)
2367 {
2368         u32 val;
2369         int rc = 0;
2370
2371         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2373
2374         bp->mii_bmcr = MII_BMCR;
2375         bp->mii_bmsr = MII_BMSR;
2376         bp->mii_bmsr1 = MII_BMSR;
2377         bp->mii_adv = MII_ADVERTISE;
2378         bp->mii_lpa = MII_LPA;
2379
2380         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
2382         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2383                 goto setup_phy;
2384
2385         bnx2_read_phy(bp, MII_PHYSID1, &val);
2386         bp->phy_id = val << 16;
2387         bnx2_read_phy(bp, MII_PHYSID2, &val);
2388         bp->phy_id |= val & 0xffff;
2389
2390         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2391                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2392                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2393                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2394                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2395                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2396                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2397         }
2398         else {
2399                 rc = bnx2_init_copper_phy(bp, reset_phy);
2400         }
2401
2402 setup_phy:
2403         if (!rc)
2404                 rc = bnx2_setup_phy(bp, bp->phy_port);
2405
2406         return rc;
2407 }
2408
2409 static int
2410 bnx2_set_mac_loopback(struct bnx2 *bp)
2411 {
2412         u32 mac_mode;
2413
2414         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418         bp->link_up = 1;
2419         return 0;
2420 }
2421
2422 static int bnx2_test_link(struct bnx2 *);
2423
2424 static int
2425 bnx2_set_phy_loopback(struct bnx2 *bp)
2426 {
2427         u32 mac_mode;
2428         int rc, i;
2429
2430         spin_lock_bh(&bp->phy_lock);
2431         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2432                             BMCR_SPEED1000);
2433         spin_unlock_bh(&bp->phy_lock);
2434         if (rc)
2435                 return rc;
2436
2437         for (i = 0; i < 10; i++) {
2438                 if (bnx2_test_link(bp) == 0)
2439                         break;
2440                 msleep(100);
2441         }
2442
2443         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2446                       BNX2_EMAC_MODE_25G_MODE);
2447
2448         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450         bp->link_up = 1;
2451         return 0;
2452 }
2453
2454 static int
2455 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2456 {
2457         int i;
2458         u32 val;
2459
2460         bp->fw_wr_seq++;
2461         msg_data |= bp->fw_wr_seq;
2462
2463         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2464
2465         if (!ack)
2466                 return 0;
2467
2468         /* wait for an acknowledgement. */
2469         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2470                 msleep(10);
2471
2472                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2473
2474                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475                         break;
2476         }
2477         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478                 return 0;
2479
2480         /* If we timed out, inform the firmware that this is the case. */
2481         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482                 if (!silent)
2483                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2484
2485                 msg_data &= ~BNX2_DRV_MSG_CODE;
2486                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2488                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2489
2490                 return -EBUSY;
2491         }
2492
2493         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494                 return -EIO;
2495
2496         return 0;
2497 }
2498
2499 static int
2500 bnx2_init_5709_context(struct bnx2 *bp)
2501 {
2502         int i, ret = 0;
2503         u32 val;
2504
2505         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506         val |= (BCM_PAGE_BITS - 8) << 16;
2507         REG_WR(bp, BNX2_CTX_COMMAND, val);
2508         for (i = 0; i < 10; i++) {
2509                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511                         break;
2512                 udelay(2);
2513         }
2514         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515                 return -EBUSY;
2516
2517         for (i = 0; i < bp->ctx_pages; i++) {
2518                 int j;
2519
2520                 if (bp->ctx_blk[i])
2521                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522                 else
2523                         return -ENOMEM;
2524
2525                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529                        (u64) bp->ctx_blk_mapping[i] >> 32);
2530                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532                 for (j = 0; j < 10; j++) {
2533
2534                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536                                 break;
2537                         udelay(5);
2538                 }
2539                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540                         ret = -EBUSY;
2541                         break;
2542                 }
2543         }
2544         return ret;
2545 }
2546
2547 static void
2548 bnx2_init_context(struct bnx2 *bp)
2549 {
2550         u32 vcid;
2551
2552         vcid = 96;
2553         while (vcid) {
2554                 u32 vcid_addr, pcid_addr, offset;
2555                 int i;
2556
2557                 vcid--;
2558
2559                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560                         u32 new_vcid;
2561
2562                         vcid_addr = GET_PCID_ADDR(vcid);
2563                         if (vcid & 0x8) {
2564                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565                         }
2566                         else {
2567                                 new_vcid = vcid;
2568                         }
2569                         pcid_addr = GET_PCID_ADDR(new_vcid);
2570                 }
2571                 else {
2572                         vcid_addr = GET_CID_ADDR(vcid);
2573                         pcid_addr = vcid_addr;
2574                 }
2575
2576                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577                         vcid_addr += (i << PHY_CTX_SHIFT);
2578                         pcid_addr += (i << PHY_CTX_SHIFT);
2579
2580                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2581                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2582
2583                         /* Zero out the context. */
2584                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2585                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2586                 }
2587         }
2588 }
2589
2590 static int
2591 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592 {
2593         u16 *good_mbuf;
2594         u32 good_mbuf_cnt;
2595         u32 val;
2596
2597         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598         if (good_mbuf == NULL) {
2599                 pr_err("Failed to allocate memory in %s\n", __func__);
2600                 return -ENOMEM;
2601         }
2602
2603         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606         good_mbuf_cnt = 0;
2607
2608         /* Allocate a bunch of mbufs and save the good ones in an array. */
2609         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2610         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2611                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2613
2614                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2615
2616                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618                 /* The addresses with Bit 9 set are bad memory blocks. */
2619                 if (!(val & (1 << 9))) {
2620                         good_mbuf[good_mbuf_cnt] = (u16) val;
2621                         good_mbuf_cnt++;
2622                 }
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2625         }
2626
2627         /* Free the good ones back to the mbuf pool thus discarding
2628          * all the bad ones. */
2629         while (good_mbuf_cnt) {
2630                 good_mbuf_cnt--;
2631
2632                 val = good_mbuf[good_mbuf_cnt];
2633                 val = (val << 9) | val | 1;
2634
2635                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2636         }
2637         kfree(good_mbuf);
2638         return 0;
2639 }
2640
2641 static void
2642 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2643 {
2644         u32 val;
2645
2646         val = (mac_addr[0] << 8) | mac_addr[1];
2647
2648         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2649
2650         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2651                 (mac_addr[4] << 8) | mac_addr[5];
2652
2653         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2654 }
2655
2656 static inline int
2657 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2658 {
2659         dma_addr_t mapping;
2660         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2661         struct rx_bd *rxbd =
2662                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2663         struct page *page = alloc_page(GFP_ATOMIC);
2664
2665         if (!page)
2666                 return -ENOMEM;
2667         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668                                PCI_DMA_FROMDEVICE);
2669         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670                 __free_page(page);
2671                 return -EIO;
2672         }
2673
2674         rx_pg->page = page;
2675         dma_unmap_addr_set(rx_pg, mapping, mapping);
2676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678         return 0;
2679 }
2680
2681 static void
2682 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 {
2684         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2685         struct page *page = rx_pg->page;
2686
2687         if (!page)
2688                 return;
2689
2690         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691                        PCI_DMA_FROMDEVICE);
2692
2693         __free_page(page);
2694         rx_pg->page = NULL;
2695 }
2696
2697 static inline int
2698 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699 {
2700         struct sk_buff *skb;
2701         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2702         dma_addr_t mapping;
2703         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2704         unsigned long align;
2705
2706         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2707         if (skb == NULL) {
2708                 return -ENOMEM;
2709         }
2710
2711         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2713
2714         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715                 PCI_DMA_FROMDEVICE);
2716         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717                 dev_kfree_skb(skb);
2718                 return -EIO;
2719         }
2720
2721         rx_buf->skb = skb;
2722         rx_buf->desc = (struct l2_fhdr *) skb->data;
2723         dma_unmap_addr_set(rx_buf, mapping, mapping);
2724
2725         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727
2728         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2729
2730         return 0;
2731 }
2732
2733 static int
2734 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2735 {
2736         struct status_block *sblk = bnapi->status_blk.msi;
2737         u32 new_link_state, old_link_state;
2738         int is_set = 1;
2739
2740         new_link_state = sblk->status_attn_bits & event;
2741         old_link_state = sblk->status_attn_bits_ack & event;
2742         if (new_link_state != old_link_state) {
2743                 if (new_link_state)
2744                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2745                 else
2746                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2747         } else
2748                 is_set = 0;
2749
2750         return is_set;
2751 }
2752
2753 static void
2754 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2755 {
2756         spin_lock(&bp->phy_lock);
2757
2758         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2759                 bnx2_set_link(bp);
2760         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2761                 bnx2_set_remote_link(bp);
2762
2763         spin_unlock(&bp->phy_lock);
2764
2765 }
2766
2767 static inline u16
2768 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2769 {
2770         u16 cons;
2771
2772         /* Tell compiler that status block fields can change. */
2773         barrier();
2774         cons = *bnapi->hw_tx_cons_ptr;
2775         barrier();
2776         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2777                 cons++;
2778         return cons;
2779 }
2780
2781 static int
2782 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2783 {
2784         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2785         u16 hw_cons, sw_cons, sw_ring_cons;
2786         int tx_pkt = 0, index;
2787         struct netdev_queue *txq;
2788
2789         index = (bnapi - bp->bnx2_napi);
2790         txq = netdev_get_tx_queue(bp->dev, index);
2791
2792         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2793         sw_cons = txr->tx_cons;
2794
2795         while (sw_cons != hw_cons) {
2796                 struct sw_tx_bd *tx_buf;
2797                 struct sk_buff *skb;
2798                 int i, last;
2799
2800                 sw_ring_cons = TX_RING_IDX(sw_cons);
2801
2802                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2803                 skb = tx_buf->skb;
2804
2805                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2806                 prefetch(&skb->end);
2807
2808                 /* partial BD completions possible with TSO packets */
2809                 if (tx_buf->is_gso) {
2810                         u16 last_idx, last_ring_idx;
2811
2812                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2813                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2814                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2815                                 last_idx++;
2816                         }
2817                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2818                                 break;
2819                         }
2820                 }
2821
2822                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2823                         skb_headlen(skb), PCI_DMA_TODEVICE);
2824
2825                 tx_buf->skb = NULL;
2826                 last = tx_buf->nr_frags;
2827
2828                 for (i = 0; i < last; i++) {
2829                         sw_cons = NEXT_TX_BD(sw_cons);
2830
2831                         pci_unmap_page(bp->pdev,
2832                                 dma_unmap_addr(
2833                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2834                                         mapping),
2835                                 skb_shinfo(skb)->frags[i].size,
2836                                 PCI_DMA_TODEVICE);
2837                 }
2838
2839                 sw_cons = NEXT_TX_BD(sw_cons);
2840
2841                 dev_kfree_skb(skb);
2842                 tx_pkt++;
2843                 if (tx_pkt == budget)
2844                         break;
2845
2846                 if (hw_cons == sw_cons)
2847                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2848         }
2849
2850         txr->hw_tx_cons = hw_cons;
2851         txr->tx_cons = sw_cons;
2852
2853         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2854          * before checking for netif_tx_queue_stopped().  Without the
2855          * memory barrier, there is a small possibility that bnx2_start_xmit()
2856          * will miss it and cause the queue to be stopped forever.
2857          */
2858         smp_mb();
2859
2860         if (unlikely(netif_tx_queue_stopped(txq)) &&
2861                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2862                 __netif_tx_lock(txq, smp_processor_id());
2863                 if ((netif_tx_queue_stopped(txq)) &&
2864                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2865                         netif_tx_wake_queue(txq);
2866                 __netif_tx_unlock(txq);
2867         }
2868
2869         return tx_pkt;
2870 }
2871
2872 static void
2873 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2874                         struct sk_buff *skb, int count)
2875 {
2876         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2877         struct rx_bd *cons_bd, *prod_bd;
2878         int i;
2879         u16 hw_prod, prod;
2880         u16 cons = rxr->rx_pg_cons;
2881
2882         cons_rx_pg = &rxr->rx_pg_ring[cons];
2883
2884         /* The caller was unable to allocate a new page to replace the
2885          * last one in the frags array, so we need to recycle that page
2886          * and then free the skb.
2887          */
2888         if (skb) {
2889                 struct page *page;
2890                 struct skb_shared_info *shinfo;
2891
2892                 shinfo = skb_shinfo(skb);
2893                 shinfo->nr_frags--;
2894                 page = shinfo->frags[shinfo->nr_frags].page;
2895                 shinfo->frags[shinfo->nr_frags].page = NULL;
2896
2897                 cons_rx_pg->page = page;
2898                 dev_kfree_skb(skb);
2899         }
2900
2901         hw_prod = rxr->rx_pg_prod;
2902
2903         for (i = 0; i < count; i++) {
2904                 prod = RX_PG_RING_IDX(hw_prod);
2905
2906                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2907                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2908                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2909                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2910
2911                 if (prod != cons) {
2912                         prod_rx_pg->page = cons_rx_pg->page;
2913                         cons_rx_pg->page = NULL;
2914                         dma_unmap_addr_set(prod_rx_pg, mapping,
2915                                 dma_unmap_addr(cons_rx_pg, mapping));
2916
2917                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2918                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2919
2920                 }
2921                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2922                 hw_prod = NEXT_RX_BD(hw_prod);
2923         }
2924         rxr->rx_pg_prod = hw_prod;
2925         rxr->rx_pg_cons = cons;
2926 }
2927
2928 static inline void
2929 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2930                   struct sk_buff *skb, u16 cons, u16 prod)
2931 {
2932         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2933         struct rx_bd *cons_bd, *prod_bd;
2934
2935         cons_rx_buf = &rxr->rx_buf_ring[cons];
2936         prod_rx_buf = &rxr->rx_buf_ring[prod];
2937
2938         pci_dma_sync_single_for_device(bp->pdev,
2939                 dma_unmap_addr(cons_rx_buf, mapping),
2940                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2941
2942         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2943
2944         prod_rx_buf->skb = skb;
2945         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2946
2947         if (cons == prod)
2948                 return;
2949
2950         dma_unmap_addr_set(prod_rx_buf, mapping,
2951                         dma_unmap_addr(cons_rx_buf, mapping));
2952
2953         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2954         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2955         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2957 }
2958
2959 static int
2960 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2961             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2962             u32 ring_idx)
2963 {
2964         int err;
2965         u16 prod = ring_idx & 0xffff;
2966
2967         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2968         if (unlikely(err)) {
2969                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2970                 if (hdr_len) {
2971                         unsigned int raw_len = len + 4;
2972                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2973
2974                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2975                 }
2976                 return err;
2977         }
2978
2979         skb_reserve(skb, BNX2_RX_OFFSET);
2980         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2981                          PCI_DMA_FROMDEVICE);
2982
2983         if (hdr_len == 0) {
2984                 skb_put(skb, len);
2985                 return 0;
2986         } else {
2987                 unsigned int i, frag_len, frag_size, pages;
2988                 struct sw_pg *rx_pg;
2989                 u16 pg_cons = rxr->rx_pg_cons;
2990                 u16 pg_prod = rxr->rx_pg_prod;
2991
2992                 frag_size = len + 4 - hdr_len;
2993                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2994                 skb_put(skb, hdr_len);
2995
2996                 for (i = 0; i < pages; i++) {
2997                         dma_addr_t mapping_old;
2998
2999                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3000                         if (unlikely(frag_len <= 4)) {
3001                                 unsigned int tail = 4 - frag_len;
3002
3003                                 rxr->rx_pg_cons = pg_cons;
3004                                 rxr->rx_pg_prod = pg_prod;
3005                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3006                                                         pages - i);
3007                                 skb->len -= tail;
3008                                 if (i == 0) {
3009                                         skb->tail -= tail;
3010                                 } else {
3011                                         skb_frag_t *frag =
3012                                                 &skb_shinfo(skb)->frags[i - 1];
3013                                         frag->size -= tail;
3014                                         skb->data_len -= tail;
3015                                         skb->truesize -= tail;
3016                                 }
3017                                 return 0;
3018                         }
3019                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3020
3021                         /* Don't unmap yet.  If we're unable to allocate a new
3022                          * page, we need to recycle the page and the DMA addr.
3023                          */
3024                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3025                         if (i == pages - 1)
3026                                 frag_len -= 4;
3027
3028                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3029                         rx_pg->page = NULL;
3030
3031                         err = bnx2_alloc_rx_page(bp, rxr,
3032                                                  RX_PG_RING_IDX(pg_prod));
3033                         if (unlikely(err)) {
3034                                 rxr->rx_pg_cons = pg_cons;
3035                                 rxr->rx_pg_prod = pg_prod;
3036                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3037                                                         pages - i);
3038                                 return err;
3039                         }
3040
3041                         pci_unmap_page(bp->pdev, mapping_old,
3042                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3043
3044                         frag_size -= frag_len;
3045                         skb->data_len += frag_len;
3046                         skb->truesize += frag_len;
3047                         skb->len += frag_len;
3048
3049                         pg_prod = NEXT_RX_BD(pg_prod);
3050                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3051                 }
3052                 rxr->rx_pg_prod = pg_prod;
3053                 rxr->rx_pg_cons = pg_cons;
3054         }
3055         return 0;
3056 }
3057
3058 static inline u16
3059 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3060 {
3061         u16 cons;
3062
3063         /* Tell compiler that status block fields can change. */
3064         barrier();
3065         cons = *bnapi->hw_rx_cons_ptr;
3066         barrier();
3067         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3068                 cons++;
3069         return cons;
3070 }
3071
3072 static int
3073 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3074 {
3075         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3076         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3077         struct l2_fhdr *rx_hdr;
3078         int rx_pkt = 0, pg_ring_used = 0;
3079         struct pci_dev *pdev = bp->pdev;
3080
3081         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3082         sw_cons = rxr->rx_cons;
3083         sw_prod = rxr->rx_prod;
3084
3085         /* Memory barrier necessary as speculative reads of the rx
3086          * buffer can be ahead of the index in the status block
3087          */
3088         rmb();
3089         while (sw_cons != hw_cons) {
3090                 unsigned int len, hdr_len;
3091                 u32 status;
3092                 struct sw_bd *rx_buf, *next_rx_buf;
3093                 struct sk_buff *skb;
3094                 dma_addr_t dma_addr;
3095                 u16 vtag = 0;
3096                 int hw_vlan __maybe_unused = 0;
3097
3098                 sw_ring_cons = RX_RING_IDX(sw_cons);
3099                 sw_ring_prod = RX_RING_IDX(sw_prod);
3100
3101                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3102                 skb = rx_buf->skb;
3103                 prefetchw(skb);
3104
3105                 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3106                         next_rx_buf =
3107                                 &rxr->rx_buf_ring[
3108                                         RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3109                         prefetch(next_rx_buf->desc);
3110                 }
3111                 rx_buf->skb = NULL;
3112
3113                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3114
3115                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3116                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3117                         PCI_DMA_FROMDEVICE);
3118
3119                 rx_hdr = rx_buf->desc;
3120                 len = rx_hdr->l2_fhdr_pkt_len;
3121                 status = rx_hdr->l2_fhdr_status;
3122
3123                 hdr_len = 0;
3124                 if (status & L2_FHDR_STATUS_SPLIT) {
3125                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3126                         pg_ring_used = 1;
3127                 } else if (len > bp->rx_jumbo_thresh) {
3128                         hdr_len = bp->rx_jumbo_thresh;
3129                         pg_ring_used = 1;
3130                 }
3131
3132                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3133                                        L2_FHDR_ERRORS_PHY_DECODE |
3134                                        L2_FHDR_ERRORS_ALIGNMENT |
3135                                        L2_FHDR_ERRORS_TOO_SHORT |
3136                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3137
3138                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3139                                           sw_ring_prod);
3140                         if (pg_ring_used) {
3141                                 int pages;
3142
3143                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3144
3145                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3146                         }
3147                         goto next_rx;
3148                 }
3149
3150                 len -= 4;
3151
3152                 if (len <= bp->rx_copy_thresh) {
3153                         struct sk_buff *new_skb;
3154
3155                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3156                         if (new_skb == NULL) {
3157                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3158                                                   sw_ring_prod);
3159                                 goto next_rx;
3160                         }
3161
3162                         /* aligned copy */
3163                         skb_copy_from_linear_data_offset(skb,
3164                                                          BNX2_RX_OFFSET - 6,
3165                                       new_skb->data, len + 6);
3166                         skb_reserve(new_skb, 6);
3167                         skb_put(new_skb, len);
3168
3169                         bnx2_reuse_rx_skb(bp, rxr, skb,
3170                                 sw_ring_cons, sw_ring_prod);
3171
3172                         skb = new_skb;
3173                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3174                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3175                         goto next_rx;
3176
3177                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3178                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3179                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3180 #ifdef BCM_VLAN
3181                         if (bp->vlgrp)
3182                                 hw_vlan = 1;
3183                         else
3184 #endif
3185                         {
3186                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3187                                         __skb_push(skb, 4);
3188
3189                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3190                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3191                                 ve->h_vlan_TCI = htons(vtag);
3192                                 len += 4;
3193                         }
3194                 }
3195
3196                 skb->protocol = eth_type_trans(skb, bp->dev);
3197
3198                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3199                         (ntohs(skb->protocol) != 0x8100)) {
3200
3201                         dev_kfree_skb(skb);
3202                         goto next_rx;
3203
3204                 }
3205
3206                 skb->ip_summed = CHECKSUM_NONE;
3207                 if (bp->rx_csum &&
3208                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3209                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3210
3211                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3212                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3213                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3214                 }
3215
3216                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3217
3218 #ifdef BCM_VLAN
3219                 if (hw_vlan)
3220                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3221                 else
3222 #endif
3223                         napi_gro_receive(&bnapi->napi, skb);
3224
3225                 rx_pkt++;
3226
3227 next_rx:
3228                 sw_cons = NEXT_RX_BD(sw_cons);
3229                 sw_prod = NEXT_RX_BD(sw_prod);
3230
3231                 if ((rx_pkt == budget))
3232                         break;
3233
3234                 /* Refresh hw_cons to see if there is new work */
3235                 if (sw_cons == hw_cons) {
3236                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3237                         rmb();
3238                 }
3239         }
3240         rxr->rx_cons = sw_cons;
3241         rxr->rx_prod = sw_prod;
3242
3243         if (pg_ring_used)
3244                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3245
3246         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3247
3248         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3249
3250         mmiowb();
3251
3252         return rx_pkt;
3253
3254 }
3255
3256 /* MSI ISR - The only difference between this and the INTx ISR
3257  * is that the MSI interrupt is always serviced.
3258  */
3259 static irqreturn_t
3260 bnx2_msi(int irq, void *dev_instance)
3261 {
3262         struct bnx2_napi *bnapi = dev_instance;
3263         struct bnx2 *bp = bnapi->bp;
3264
3265         prefetch(bnapi->status_blk.msi);
3266         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3267                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3268                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3269
3270         /* Return here if interrupt is disabled. */
3271         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3272                 return IRQ_HANDLED;
3273
3274         napi_schedule(&bnapi->napi);
3275
3276         return IRQ_HANDLED;
3277 }
3278
3279 static irqreturn_t
3280 bnx2_msi_1shot(int irq, void *dev_instance)
3281 {
3282         struct bnx2_napi *bnapi = dev_instance;
3283         struct bnx2 *bp = bnapi->bp;
3284
3285         prefetch(bnapi->status_blk.msi);
3286
3287         /* Return here if interrupt is disabled. */
3288         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289                 return IRQ_HANDLED;
3290
3291         napi_schedule(&bnapi->napi);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 static irqreturn_t
3297 bnx2_interrupt(int irq, void *dev_instance)
3298 {
3299         struct bnx2_napi *bnapi = dev_instance;
3300         struct bnx2 *bp = bnapi->bp;
3301         struct status_block *sblk = bnapi->status_blk.msi;
3302
3303         /* When using INTx, it is possible for the interrupt to arrive
3304          * at the CPU before the status block posted prior to the
3305          * interrupt. Reading a register will flush the status block.
3306          * When using MSI, the MSI message will always complete after
3307          * the status block write.
3308          */
3309         if ((sblk->status_idx == bnapi->last_status_idx) &&
3310             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3311              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3312                 return IRQ_NONE;
3313
3314         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3315                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3316                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3317
3318         /* Read back to deassert IRQ immediately to avoid too many
3319          * spurious interrupts.
3320          */
3321         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3322
3323         /* Return here if interrupt is shared and is disabled. */
3324         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3325                 return IRQ_HANDLED;
3326
3327         if (napi_schedule_prep(&bnapi->napi)) {
3328                 bnapi->last_status_idx = sblk->status_idx;
3329                 __napi_schedule(&bnapi->napi);
3330         }
3331
3332         return IRQ_HANDLED;
3333 }
3334
3335 static inline int
3336 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3337 {
3338         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3339         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3340
3341         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3342             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3343                 return 1;
3344         return 0;
3345 }
3346
3347 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3348                                  STATUS_ATTN_BITS_TIMER_ABORT)
3349
3350 static inline int
3351 bnx2_has_work(struct bnx2_napi *bnapi)
3352 {
3353         struct status_block *sblk = bnapi->status_blk.msi;
3354
3355         if (bnx2_has_fast_work(bnapi))
3356                 return 1;
3357
3358 #ifdef BCM_CNIC
3359         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3360                 return 1;
3361 #endif
3362
3363         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3364             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3365                 return 1;
3366
3367         return 0;
3368 }
3369
3370 static void
3371 bnx2_chk_missed_msi(struct bnx2 *bp)
3372 {
3373         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3374         u32 msi_ctrl;
3375
3376         if (bnx2_has_work(bnapi)) {
3377                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3378                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3379                         return;
3380
3381                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3382                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3383                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3384                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3385                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3386                 }
3387         }
3388
3389         bp->idle_chk_status_idx = bnapi->last_status_idx;
3390 }
3391
3392 #ifdef BCM_CNIC
3393 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3394 {
3395         struct cnic_ops *c_ops;
3396
3397         if (!bnapi->cnic_present)
3398                 return;
3399
3400         rcu_read_lock();
3401         c_ops = rcu_dereference(bp->cnic_ops);
3402         if (c_ops)
3403                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3404                                                       bnapi->status_blk.msi);
3405         rcu_read_unlock();
3406 }
3407 #endif
3408
3409 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410 {
3411         struct status_block *sblk = bnapi->status_blk.msi;
3412         u32 status_attn_bits = sblk->status_attn_bits;
3413         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3414
3415         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3416             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3417
3418                 bnx2_phy_int(bp, bnapi);
3419
3420                 /* This is needed to take care of transient status
3421                  * during link changes.
3422                  */
3423                 REG_WR(bp, BNX2_HC_COMMAND,
3424                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3425                 REG_RD(bp, BNX2_HC_COMMAND);
3426         }
3427 }
3428
3429 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3430                           int work_done, int budget)
3431 {
3432         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3433         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3434
3435         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3436                 bnx2_tx_int(bp, bnapi, 0);
3437
3438         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3439                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3440
3441         return work_done;
3442 }
3443
3444 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3445 {
3446         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3447         struct bnx2 *bp = bnapi->bp;
3448         int work_done = 0;
3449         struct status_block_msix *sblk = bnapi->status_blk.msix;
3450
3451         while (1) {
3452                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3453                 if (unlikely(work_done >= budget))
3454                         break;
3455
3456                 bnapi->last_status_idx = sblk->status_idx;
3457                 /* status idx must be read before checking for more work. */
3458                 rmb();
3459                 if (likely(!bnx2_has_fast_work(bnapi))) {
3460
3461                         napi_complete(napi);
3462                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3463                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3464                                bnapi->last_status_idx);
3465                         break;
3466                 }
3467         }
3468         return work_done;
3469 }
3470
3471 static int bnx2_poll(struct napi_struct *napi, int budget)
3472 {
3473         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3474         struct bnx2 *bp = bnapi->bp;
3475         int work_done = 0;
3476         struct status_block *sblk = bnapi->status_blk.msi;
3477
3478         while (1) {
3479                 bnx2_poll_link(bp, bnapi);
3480
3481                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3482
3483 #ifdef BCM_CNIC
3484                 bnx2_poll_cnic(bp, bnapi);
3485 #endif
3486
3487                 /* bnapi->last_status_idx is used below to tell the hw how
3488                  * much work has been processed, so we must read it before
3489                  * checking for more work.
3490                  */
3491                 bnapi->last_status_idx = sblk->status_idx;
3492
3493                 if (unlikely(work_done >= budget))
3494                         break;
3495
3496                 rmb();
3497                 if (likely(!bnx2_has_work(bnapi))) {
3498                         napi_complete(napi);
3499                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3500                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502                                        bnapi->last_status_idx);
3503                                 break;
3504                         }
3505                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3506                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3507                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3508                                bnapi->last_status_idx);
3509
3510                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3511                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512                                bnapi->last_status_idx);
3513                         break;
3514                 }
3515         }
3516
3517         return work_done;
3518 }
3519
3520 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3521  * from set_multicast.
3522  */
3523 static void
3524 bnx2_set_rx_mode(struct net_device *dev)
3525 {
3526         struct bnx2 *bp = netdev_priv(dev);
3527         u32 rx_mode, sort_mode;
3528         struct netdev_hw_addr *ha;
3529         int i;
3530
3531         if (!netif_running(dev))
3532                 return;
3533
3534         spin_lock_bh(&bp->phy_lock);
3535
3536         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3537                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3538         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3539 #ifdef BCM_VLAN
3540         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3541                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3542 #else
3543         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3544                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3545 #endif
3546         if (dev->flags & IFF_PROMISC) {
3547                 /* Promiscuous mode. */
3548                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3549                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3550                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3551         }
3552         else if (dev->flags & IFF_ALLMULTI) {
3553                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3554                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3555                                0xffffffff);
3556                 }
3557                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3558         }
3559         else {
3560                 /* Accept one or more multicast(s). */
3561                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3562                 u32 regidx;
3563                 u32 bit;
3564                 u32 crc;
3565
3566                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3567
3568                 netdev_for_each_mc_addr(ha, dev) {
3569                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3570                         bit = crc & 0xff;
3571                         regidx = (bit & 0xe0) >> 5;
3572                         bit &= 0x1f;
3573                         mc_filter[regidx] |= (1 << bit);
3574                 }
3575
3576                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3577                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3578                                mc_filter[i]);
3579                 }
3580
3581                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3582         }
3583
3584         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3585                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3586                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3587                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3588         } else if (!(dev->flags & IFF_PROMISC)) {
3589                 /* Add all entries into to the match filter list */
3590                 i = 0;
3591                 netdev_for_each_uc_addr(ha, dev) {
3592                         bnx2_set_mac_addr(bp, ha->addr,
3593                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3594                         sort_mode |= (1 <<
3595                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3596                         i++;
3597                 }
3598
3599         }
3600
3601         if (rx_mode != bp->rx_mode) {
3602                 bp->rx_mode = rx_mode;
3603                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3604         }
3605
3606         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3607         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3608         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3609
3610         spin_unlock_bh(&bp->phy_lock);
3611 }
3612
3613 static int __devinit
3614 check_fw_section(const struct firmware *fw,
3615                  const struct bnx2_fw_file_section *section,
3616                  u32 alignment, bool non_empty)
3617 {
3618         u32 offset = be32_to_cpu(section->offset);
3619         u32 len = be32_to_cpu(section->len);
3620
3621         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3622                 return -EINVAL;
3623         if ((non_empty && len == 0) || len > fw->size - offset ||
3624             len & (alignment - 1))
3625                 return -EINVAL;
3626         return 0;
3627 }
3628
3629 static int __devinit
3630 check_mips_fw_entry(const struct firmware *fw,
3631                     const struct bnx2_mips_fw_file_entry *entry)
3632 {
3633         if (check_fw_section(fw, &entry->text, 4, true) ||
3634             check_fw_section(fw, &entry->data, 4, false) ||
3635             check_fw_section(fw, &entry->rodata, 4, false))
3636                 return -EINVAL;
3637         return 0;
3638 }
3639
3640 static int __devinit
3641 bnx2_request_firmware(struct bnx2 *bp)
3642 {
3643         const char *mips_fw_file, *rv2p_fw_file;
3644         const struct bnx2_mips_fw_file *mips_fw;
3645         const struct bnx2_rv2p_fw_file *rv2p_fw;
3646         int rc;
3647
3648         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3649                 mips_fw_file = FW_MIPS_FILE_09;
3650                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3651                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3652                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3653                 else
3654                         rv2p_fw_file = FW_RV2P_FILE_09;
3655         } else {
3656                 mips_fw_file = FW_MIPS_FILE_06;
3657                 rv2p_fw_file = FW_RV2P_FILE_06;
3658         }
3659
3660         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3661         if (rc) {
3662                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3663                 return rc;
3664         }
3665
3666         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3667         if (rc) {
3668                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3669                 return rc;
3670         }
3671         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3672         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3673         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3674             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3675             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3676             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3677             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3678             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3679                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3680                 return -EINVAL;
3681         }
3682         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3683             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3684             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3685                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3686                 return -EINVAL;
3687         }
3688
3689         return 0;
3690 }
3691
3692 static u32
3693 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3694 {
3695         switch (idx) {
3696         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3697                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3698                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3699                 break;
3700         }
3701         return rv2p_code;
3702 }
3703
3704 static int
3705 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3706              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3707 {
3708         u32 rv2p_code_len, file_offset;
3709         __be32 *rv2p_code;
3710         int i;
3711         u32 val, cmd, addr;
3712
3713         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3714         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3715
3716         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3717
3718         if (rv2p_proc == RV2P_PROC1) {
3719                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3720                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3721         } else {
3722                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3723                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3724         }
3725
3726         for (i = 0; i < rv2p_code_len; i += 8) {
3727                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3728                 rv2p_code++;
3729                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3730                 rv2p_code++;
3731
3732                 val = (i / 8) | cmd;
3733                 REG_WR(bp, addr, val);
3734         }
3735
3736         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3737         for (i = 0; i < 8; i++) {
3738                 u32 loc, code;
3739
3740                 loc = be32_to_cpu(fw_entry->fixup[i]);
3741                 if (loc && ((loc * 4) < rv2p_code_len)) {
3742                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3743                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3744                         code = be32_to_cpu(*(rv2p_code + loc));
3745                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3746                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3747
3748                         val = (loc / 2) | cmd;
3749                         REG_WR(bp, addr, val);
3750                 }
3751         }
3752
3753         /* Reset the processor, un-stall is done later. */
3754         if (rv2p_proc == RV2P_PROC1) {
3755                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3756         }
3757         else {
3758                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3759         }
3760
3761         return 0;
3762 }
3763
3764 static int
3765 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3766             const struct bnx2_mips_fw_file_entry *fw_entry)
3767 {
3768         u32 addr, len, file_offset;
3769         __be32 *data;
3770         u32 offset;
3771         u32 val;
3772
3773         /* Halt the CPU. */
3774         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3775         val |= cpu_reg->mode_value_halt;
3776         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3777         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3778
3779         /* Load the Text area. */
3780         addr = be32_to_cpu(fw_entry->text.addr);
3781         len = be32_to_cpu(fw_entry->text.len);
3782         file_offset = be32_to_cpu(fw_entry->text.offset);
3783         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3784
3785         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3786         if (len) {
3787                 int j;
3788
3789                 for (j = 0; j < (len / 4); j++, offset += 4)
3790                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3791         }
3792
3793         /* Load the Data area. */
3794         addr = be32_to_cpu(fw_entry->data.addr);
3795         len = be32_to_cpu(fw_entry->data.len);
3796         file_offset = be32_to_cpu(fw_entry->data.offset);
3797         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3798
3799         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3800         if (len) {
3801                 int j;
3802
3803                 for (j = 0; j < (len / 4); j++, offset += 4)
3804                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3805         }
3806
3807         /* Load the Read-Only area. */
3808         addr = be32_to_cpu(fw_entry->rodata.addr);
3809         len = be32_to_cpu(fw_entry->rodata.len);
3810         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3811         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3812
3813         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3814         if (len) {
3815                 int j;
3816
3817                 for (j = 0; j < (len / 4); j++, offset += 4)
3818                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3819         }
3820
3821         /* Clear the pre-fetch instruction. */
3822         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3823
3824         val = be32_to_cpu(fw_entry->start_addr);
3825         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3826
3827         /* Start the CPU. */
3828         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3829         val &= ~cpu_reg->mode_value_halt;
3830         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3831         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3832
3833         return 0;
3834 }
3835
3836 static int
3837 bnx2_init_cpus(struct bnx2 *bp)
3838 {
3839         const struct bnx2_mips_fw_file *mips_fw =
3840                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3841         const struct bnx2_rv2p_fw_file *rv2p_fw =
3842                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3843         int rc;
3844
3845         /* Initialize the RV2P processor. */
3846         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3847         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3848
3849         /* Initialize the RX Processor. */
3850         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3851         if (rc)
3852                 goto init_cpu_err;
3853
3854         /* Initialize the TX Processor. */
3855         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3856         if (rc)
3857                 goto init_cpu_err;
3858
3859         /* Initialize the TX Patch-up Processor. */
3860         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3861         if (rc)
3862                 goto init_cpu_err;
3863
3864         /* Initialize the Completion Processor. */
3865         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3866         if (rc)
3867                 goto init_cpu_err;
3868
3869         /* Initialize the Command Processor. */
3870         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3871
3872 init_cpu_err:
3873         return rc;
3874 }
3875
3876 static int
3877 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3878 {
3879         u16 pmcsr;
3880
3881         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3882
3883         switch (state) {
3884         case PCI_D0: {
3885                 u32 val;
3886
3887                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3888                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3889                         PCI_PM_CTRL_PME_STATUS);
3890
3891                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3892                         /* delay required during transition out of D3hot */
3893                         msleep(20);
3894
3895                 val = REG_RD(bp, BNX2_EMAC_MODE);
3896                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3897                 val &= ~BNX2_EMAC_MODE_MPKT;
3898                 REG_WR(bp, BNX2_EMAC_MODE, val);
3899
3900                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3901                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3902                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3903                 break;
3904         }
3905         case PCI_D3hot: {
3906                 int i;
3907                 u32 val, wol_msg;
3908
3909                 if (bp->wol) {
3910                         u32 advertising;
3911                         u8 autoneg;
3912
3913                         autoneg = bp->autoneg;
3914                         advertising = bp->advertising;
3915
3916                         if (bp->phy_port == PORT_TP) {
3917                                 bp->autoneg = AUTONEG_SPEED;
3918                                 bp->advertising = ADVERTISED_10baseT_Half |
3919                                         ADVERTISED_10baseT_Full |
3920                                         ADVERTISED_100baseT_Half |
3921                                         ADVERTISED_100baseT_Full |
3922                                         ADVERTISED_Autoneg;
3923                         }
3924
3925                         spin_lock_bh(&bp->phy_lock);
3926                         bnx2_setup_phy(bp, bp->phy_port);
3927                         spin_unlock_bh(&bp->phy_lock);
3928
3929                         bp->autoneg = autoneg;
3930                         bp->advertising = advertising;
3931
3932                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3933
3934                         val = REG_RD(bp, BNX2_EMAC_MODE);
3935
3936                         /* Enable port mode. */
3937                         val &= ~BNX2_EMAC_MODE_PORT;
3938                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3939                                BNX2_EMAC_MODE_ACPI_RCVD |
3940                                BNX2_EMAC_MODE_MPKT;
3941                         if (bp->phy_port == PORT_TP)
3942                                 val |= BNX2_EMAC_MODE_PORT_MII;
3943                         else {
3944                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3945                                 if (bp->line_speed == SPEED_2500)
3946                                         val |= BNX2_EMAC_MODE_25G_MODE;
3947                         }
3948
3949                         REG_WR(bp, BNX2_EMAC_MODE, val);
3950
3951                         /* receive all multicast */
3952                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3953                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3954                                        0xffffffff);
3955                         }
3956                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3957                                BNX2_EMAC_RX_MODE_SORT_MODE);
3958
3959                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3960                               BNX2_RPM_SORT_USER0_MC_EN;
3961                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3962                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3963                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3964                                BNX2_RPM_SORT_USER0_ENA);
3965
3966                         /* Need to enable EMAC and RPM for WOL. */
3967                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3968                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3969                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3970                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3971
3972                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3973                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3974                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3975
3976                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3977                 }
3978                 else {
3979                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3980                 }
3981
3982                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3983                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3984                                      1, 0);
3985
3986                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3987                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3988                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3989
3990                         if (bp->wol)
3991                                 pmcsr |= 3;
3992                 }
3993                 else {
3994                         pmcsr |= 3;
3995                 }
3996                 if (bp->wol) {
3997                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3998                 }
3999                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4000                                       pmcsr);
4001
4002                 /* No more memory access after this point until
4003                  * device is brought back to D0.
4004                  */
4005                 udelay(50);
4006                 break;
4007         }
4008         default:
4009                 return -EINVAL;
4010         }
4011         return 0;
4012 }
4013
4014 static int
4015 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4016 {
4017         u32 val;
4018         int j;
4019
4020         /* Request access to the flash interface. */
4021         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4022         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4023                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4024                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4025                         break;
4026
4027                 udelay(5);
4028         }
4029
4030         if (j >= NVRAM_TIMEOUT_COUNT)
4031                 return -EBUSY;
4032
4033         return 0;
4034 }
4035
4036 static int
4037 bnx2_release_nvram_lock(struct bnx2 *bp)
4038 {
4039         int j;
4040         u32 val;
4041
4042         /* Relinquish nvram interface. */
4043         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4044
4045         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4046                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4047                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4048                         break;
4049
4050                 udelay(5);
4051         }
4052
4053         if (j >= NVRAM_TIMEOUT_COUNT)
4054                 return -EBUSY;
4055
4056         return 0;
4057 }
4058
4059
4060 static int
4061 bnx2_enable_nvram_write(struct bnx2 *bp)
4062 {
4063         u32 val;
4064
4065         val = REG_RD(bp, BNX2_MISC_CFG);
4066         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4067
4068         if (bp->flash_info->flags & BNX2_NV_WREN) {
4069                 int j;
4070
4071                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4072                 REG_WR(bp, BNX2_NVM_COMMAND,
4073                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4074
4075                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4076                         udelay(5);
4077
4078                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4079                         if (val & BNX2_NVM_COMMAND_DONE)
4080                                 break;
4081                 }
4082
4083                 if (j >= NVRAM_TIMEOUT_COUNT)
4084                         return -EBUSY;
4085         }
4086         return 0;
4087 }
4088
4089 static void
4090 bnx2_disable_nvram_write(struct bnx2 *bp)
4091 {
4092         u32 val;
4093
4094         val = REG_RD(bp, BNX2_MISC_CFG);
4095         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4096 }
4097
4098
4099 static void
4100 bnx2_enable_nvram_access(struct bnx2 *bp)
4101 {
4102         u32 val;
4103
4104         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105         /* Enable both bits, even on read. */
4106         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4107                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4108 }
4109
4110 static void
4111 bnx2_disable_nvram_access(struct bnx2 *bp)
4112 {
4113         u32 val;
4114
4115         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4116         /* Disable both bits, even after read. */
4117         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4118                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4119                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4120 }
4121
4122 static int
4123 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4124 {
4125         u32 cmd;
4126         int j;
4127
4128         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4129                 /* Buffered flash, no erase needed */
4130                 return 0;
4131
4132         /* Build an erase command */
4133         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4134               BNX2_NVM_COMMAND_DOIT;
4135
4136         /* Need to clear DONE bit separately. */
4137         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138
4139         /* Address of the NVRAM to read from. */
4140         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4141
4142         /* Issue an erase command. */
4143         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4144
4145         /* Wait for completion. */
4146         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4147                 u32 val;
4148
4149                 udelay(5);
4150
4151                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4152                 if (val & BNX2_NVM_COMMAND_DONE)
4153                         break;
4154         }
4155
4156         if (j >= NVRAM_TIMEOUT_COUNT)
4157                 return -EBUSY;
4158
4159         return 0;
4160 }
4161
4162 static int
4163 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4164 {
4165         u32 cmd;
4166         int j;
4167
4168         /* Build the command word. */
4169         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4170
4171         /* Calculate an offset of a buffered flash, not needed for 5709. */
4172         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4173                 offset = ((offset / bp->flash_info->page_size) <<
4174                            bp->flash_info->page_bits) +
4175                           (offset % bp->flash_info->page_size);
4176         }
4177
4178         /* Need to clear DONE bit separately. */
4179         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4180
4181         /* Address of the NVRAM to read from. */
4182         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4183
4184         /* Issue a read command. */
4185         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4186
4187         /* Wait for completion. */
4188         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4189                 u32 val;
4190
4191                 udelay(5);
4192
4193                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4194                 if (val & BNX2_NVM_COMMAND_DONE) {
4195                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4196                         memcpy(ret_val, &v, 4);
4197                         break;
4198                 }
4199         }
4200         if (j >= NVRAM_TIMEOUT_COUNT)
4201                 return -EBUSY;
4202
4203         return 0;
4204 }
4205
4206
4207 static int
4208 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4209 {
4210         u32 cmd;
4211         __be32 val32;
4212         int j;
4213
4214         /* Build the command word. */
4215         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4216
4217         /* Calculate an offset of a buffered flash, not needed for 5709. */
4218         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4219                 offset = ((offset / bp->flash_info->page_size) <<
4220                           bp->flash_info->page_bits) +
4221                          (offset % bp->flash_info->page_size);
4222         }
4223
4224         /* Need to clear DONE bit separately. */
4225         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4226
4227         memcpy(&val32, val, 4);
4228
4229         /* Write the data. */
4230         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4231
4232         /* Address of the NVRAM to write to. */
4233         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4234
4235         /* Issue the write command. */
4236         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4237
4238         /* Wait for completion. */
4239         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4240                 udelay(5);
4241
4242                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4243                         break;
4244         }
4245         if (j >= NVRAM_TIMEOUT_COUNT)
4246                 return -EBUSY;
4247
4248         return 0;
4249 }
4250
4251 static int
4252 bnx2_init_nvram(struct bnx2 *bp)
4253 {
4254         u32 val;
4255         int j, entry_count, rc = 0;
4256         const struct flash_spec *flash;
4257
4258         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4259                 bp->flash_info = &flash_5709;
4260                 goto get_flash_size;
4261         }
4262
4263         /* Determine the selected interface. */
4264         val = REG_RD(bp, BNX2_NVM_CFG1);
4265
4266         entry_count = ARRAY_SIZE(flash_table);
4267
4268         if (val & 0x40000000) {
4269
4270                 /* Flash interface has been reconfigured */
4271                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4272                      j++, flash++) {
4273                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4274                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4275                                 bp->flash_info = flash;
4276                                 break;
4277                         }
4278                 }
4279         }
4280         else {
4281                 u32 mask;
4282                 /* Not yet been reconfigured */
4283
4284                 if (val & (1 << 23))
4285                         mask = FLASH_BACKUP_STRAP_MASK;
4286                 else
4287                         mask = FLASH_STRAP_MASK;
4288
4289                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4290                         j++, flash++) {
4291
4292                         if ((val & mask) == (flash->strapping & mask)) {
4293                                 bp->flash_info = flash;
4294
4295                                 /* Request access to the flash interface. */
4296                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4297                                         return rc;
4298
4299                                 /* Enable access to flash interface */
4300                                 bnx2_enable_nvram_access(bp);
4301
4302                                 /* Reconfigure the flash interface */
4303                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4304                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4305                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4306                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4307
4308                                 /* Disable access to flash interface */
4309                                 bnx2_disable_nvram_access(bp);
4310                                 bnx2_release_nvram_lock(bp);
4311
4312                                 break;
4313                         }
4314                 }
4315         } /* if (val & 0x40000000) */
4316
4317         if (j == entry_count) {
4318                 bp->flash_info = NULL;
4319                 pr_alert("Unknown flash/EEPROM type\n");
4320                 return -ENODEV;
4321         }
4322
4323 get_flash_size:
4324         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4325         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4326         if (val)
4327                 bp->flash_size = val;
4328         else
4329                 bp->flash_size = bp->flash_info->total_size;
4330
4331         return rc;
4332 }
4333
4334 static int
4335 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4336                 int buf_size)
4337 {
4338         int rc = 0;
4339         u32 cmd_flags, offset32, len32, extra;
4340
4341         if (buf_size == 0)
4342                 return 0;
4343
4344         /* Request access to the flash interface. */
4345         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4346                 return rc;
4347
4348         /* Enable access to flash interface */
4349         bnx2_enable_nvram_access(bp);
4350
4351         len32 = buf_size;
4352         offset32 = offset;
4353         extra = 0;
4354
4355         cmd_flags = 0;
4356
4357         if (offset32 & 3) {
4358                 u8 buf[4];
4359                 u32 pre_len;
4360
4361                 offset32 &= ~3;
4362                 pre_len = 4 - (offset & 3);
4363
4364                 if (pre_len >= len32) {
4365                         pre_len = len32;
4366                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4367                                     BNX2_NVM_COMMAND_LAST;
4368                 }
4369                 else {
4370                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4371                 }
4372
4373                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4374
4375                 if (rc)
4376                         return rc;
4377
4378                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4379
4380                 offset32 += 4;
4381                 ret_buf += pre_len;
4382                 len32 -= pre_len;
4383         }
4384         if (len32 & 3) {
4385                 extra = 4 - (len32 & 3);
4386                 len32 = (len32 + 4) & ~3;
4387         }
4388
4389         if (len32 == 4) {
4390                 u8 buf[4];
4391
4392                 if (cmd_flags)
4393                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4394                 else
4395                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4396                                     BNX2_NVM_COMMAND_LAST;
4397
4398                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4399
4400                 memcpy(ret_buf, buf, 4 - extra);
4401         }
4402         else if (len32 > 0) {
4403                 u8 buf[4];
4404
4405                 /* Read the first word. */
4406                 if (cmd_flags)
4407                         cmd_flags = 0;
4408                 else
4409                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4410
4411                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4412
4413                 /* Advance to the next dword. */
4414                 offset32 += 4;
4415                 ret_buf += 4;
4416                 len32 -= 4;
4417
4418                 while (len32 > 4 && rc == 0) {
4419                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4420
4421                         /* Advance to the next dword. */
4422                         offset32 += 4;
4423                         ret_buf += 4;
4424                         len32 -= 4;
4425                 }
4426
4427                 if (rc)
4428                         return rc;
4429
4430                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4431                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4432
4433                 memcpy(ret_buf, buf, 4 - extra);
4434         }
4435
4436         /* Disable access to flash interface */
4437         bnx2_disable_nvram_access(bp);
4438
4439         bnx2_release_nvram_lock(bp);
4440
4441         return rc;
4442 }
4443
4444 static int
4445 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4446                 int buf_size)
4447 {
4448         u32 written, offset32, len32;
4449         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4450         int rc = 0;
4451         int align_start, align_end;
4452
4453         buf = data_buf;
4454         offset32 = offset;
4455         len32 = buf_size;
4456         align_start = align_end = 0;
4457
4458         if ((align_start = (offset32 & 3))) {
4459                 offset32 &= ~3;
4460                 len32 += align_start;
4461                 if (len32 < 4)
4462                         len32 = 4;
4463                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4464                         return rc;
4465         }
4466
4467         if (len32 & 3) {
4468                 align_end = 4 - (len32 & 3);
4469                 len32 += align_end;
4470                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4471                         return rc;
4472         }
4473
4474         if (align_start || align_end) {
4475                 align_buf = kmalloc(len32, GFP_KERNEL);
4476                 if (align_buf == NULL)
4477                         return -ENOMEM;
4478                 if (align_start) {
4479                         memcpy(align_buf, start, 4);
4480                 }
4481                 if (align_end) {
4482                         memcpy(align_buf + len32 - 4, end, 4);
4483                 }
4484                 memcpy(align_buf + align_start, data_buf, buf_size);
4485                 buf = align_buf;
4486         }
4487
4488         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4489                 flash_buffer = kmalloc(264, GFP_KERNEL);
4490                 if (flash_buffer == NULL) {
4491                         rc = -ENOMEM;
4492                         goto nvram_write_end;
4493                 }
4494         }
4495
4496         written = 0;
4497         while ((written < len32) && (rc == 0)) {
4498                 u32 page_start, page_end, data_start, data_end;
4499                 u32 addr, cmd_flags;
4500                 int i;
4501
4502                 /* Find the page_start addr */
4503                 page_start = offset32 + written;
4504                 page_start -= (page_start % bp->flash_info->page_size);
4505                 /* Find the page_end addr */
4506                 page_end = page_start + bp->flash_info->page_size;
4507                 /* Find the data_start addr */
4508                 data_start = (written == 0) ? offset32 : page_start;
4509                 /* Find the data_end addr */
4510                 data_end = (page_end > offset32 + len32) ?
4511                         (offset32 + len32) : page_end;
4512
4513                 /* Request access to the flash interface. */
4514                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4515                         goto nvram_write_end;
4516
4517                 /* Enable access to flash interface */
4518                 bnx2_enable_nvram_access(bp);
4519
4520                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4521                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4522                         int j;
4523
4524                         /* Read the whole page into the buffer
4525                          * (non-buffer flash only) */
4526                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4527                                 if (j == (bp->flash_info->page_size - 4)) {
4528                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4529                                 }
4530                                 rc = bnx2_nvram_read_dword(bp,
4531                                         page_start + j,
4532                                         &flash_buffer[j],
4533                                         cmd_flags);
4534
4535                                 if (rc)
4536                                         goto nvram_write_end;
4537
4538                                 cmd_flags = 0;
4539                         }
4540                 }
4541
4542                 /* Enable writes to flash interface (unlock write-protect) */
4543                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4544                         goto nvram_write_end;
4545
4546                 /* Loop to write back the buffer data from page_start to
4547                  * data_start */
4548                 i = 0;
4549                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4550                         /* Erase the page */
4551                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4552                                 goto nvram_write_end;
4553
4554                         /* Re-enable the write again for the actual write */
4555                         bnx2_enable_nvram_write(bp);
4556
4557                         for (addr = page_start; addr < data_start;
4558                                 addr += 4, i += 4) {
4559
4560                                 rc = bnx2_nvram_write_dword(bp, addr,
4561                                         &flash_buffer[i], cmd_flags);
4562
4563                                 if (rc != 0)
4564                                         goto nvram_write_end;
4565
4566                                 cmd_flags = 0;
4567                         }
4568                 }
4569
4570                 /* Loop to write the new data from data_start to data_end */
4571                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4572                         if ((addr == page_end - 4) ||
4573                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4574                                  (addr == data_end - 4))) {
4575
4576                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4577                         }
4578                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4579                                 cmd_flags);
4580
4581                         if (rc != 0)
4582                                 goto nvram_write_end;
4583
4584                         cmd_flags = 0;
4585                         buf += 4;
4586                 }
4587
4588                 /* Loop to write back the buffer data from data_end
4589                  * to page_end */
4590                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4591                         for (addr = data_end; addr < page_end;
4592                                 addr += 4, i += 4) {
4593
4594                                 if (addr == page_end-4) {
4595                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4596                                 }
4597                                 rc = bnx2_nvram_write_dword(bp, addr,
4598                                         &flash_buffer[i], cmd_flags);
4599
4600                                 if (rc != 0)
4601                                         goto nvram_write_end;
4602
4603                                 cmd_flags = 0;
4604                         }
4605                 }
4606
4607                 /* Disable writes to flash interface (lock write-protect) */
4608                 bnx2_disable_nvram_write(bp);
4609
4610                 /* Disable access to flash interface */
4611                 bnx2_disable_nvram_access(bp);
4612                 bnx2_release_nvram_lock(bp);
4613
4614                 /* Increment written */
4615                 written += data_end - data_start;
4616         }
4617
4618 nvram_write_end:
4619         kfree(flash_buffer);
4620         kfree(align_buf);
4621         return rc;
4622 }
4623
4624 static void
4625 bnx2_init_fw_cap(struct bnx2 *bp)
4626 {
4627         u32 val, sig = 0;
4628
4629         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4630         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4631
4632         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4633                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4634
4635         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4636         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4637                 return;
4638
4639         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4640                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4641                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4642         }
4643
4644         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4645             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4646                 u32 link;
4647
4648                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4649
4650                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4651                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4652                         bp->phy_port = PORT_FIBRE;
4653                 else
4654                         bp->phy_port = PORT_TP;
4655
4656                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4657                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4658         }
4659
4660         if (netif_running(bp->dev) && sig)
4661                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4662 }
4663
4664 static void
4665 bnx2_setup_msix_tbl(struct bnx2 *bp)
4666 {
4667         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4668
4669         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4670         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4671 }
4672
4673 static int
4674 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4675 {
4676         u32 val;
4677         int i, rc = 0;
4678         u8 old_port;
4679
4680         /* Wait for the current PCI transaction to complete before
4681          * issuing a reset. */
4682         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4683                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4684                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4685                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4686                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4687         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4688         udelay(5);
4689
4690         /* Wait for the firmware to tell us it is ok to issue a reset. */
4691         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4692
4693         /* Deposit a driver reset signature so the firmware knows that
4694          * this is a soft reset. */
4695         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4696                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4697
4698         /* Do a dummy read to force the chip to complete all current transaction
4699          * before we issue a reset. */
4700         val = REG_RD(bp, BNX2_MISC_ID);
4701
4702         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4703                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4704                 REG_RD(bp, BNX2_MISC_COMMAND);
4705                 udelay(5);
4706
4707                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4708                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4709
4710                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4711
4712         } else {
4713                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4715                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4716
4717                 /* Chip reset. */
4718                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4719
4720                 /* Reading back any register after chip reset will hang the
4721                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4722                  * of margin for write posting.
4723                  */
4724                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4725                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4726                         msleep(20);
4727
4728                 /* Reset takes approximate 30 usec */
4729                 for (i = 0; i < 10; i++) {
4730                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4731                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4733                                 break;
4734                         udelay(10);
4735                 }
4736
4737                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4738                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4739                         pr_err("Chip reset did not complete\n");
4740                         return -EBUSY;
4741                 }
4742         }
4743
4744         /* Make sure byte swapping is properly configured. */
4745         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4746         if (val != 0x01020304) {
4747                 pr_err("Chip not in correct endian mode\n");
4748                 return -ENODEV;
4749         }
4750
4751         /* Wait for the firmware to finish its initialization. */
4752         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4753         if (rc)
4754                 return rc;
4755
4756         spin_lock_bh(&bp->phy_lock);
4757         old_port = bp->phy_port;
4758         bnx2_init_fw_cap(bp);
4759         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4760             old_port != bp->phy_port)
4761                 bnx2_set_default_remote_link(bp);
4762         spin_unlock_bh(&bp->phy_lock);
4763
4764         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4765                 /* Adjust the voltage regular to two steps lower.  The default
4766                  * of this register is 0x0000000e. */
4767                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4768
4769                 /* Remove bad rbuf memory from the free pool. */
4770                 rc = bnx2_alloc_bad_rbuf(bp);
4771         }
4772
4773         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4774                 bnx2_setup_msix_tbl(bp);
4775                 /* Prevent MSIX table reads and write from timing out */
4776                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4777                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4778         }
4779
4780         return rc;
4781 }
4782
4783 static int
4784 bnx2_init_chip(struct bnx2 *bp)
4785 {
4786         u32 val, mtu;
4787         int rc, i;
4788
4789         /* Make sure the interrupt is not active. */
4790         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4791
4792         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4793               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4794 #ifdef __BIG_ENDIAN
4795               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4796 #endif
4797               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4798               DMA_READ_CHANS << 12 |
4799               DMA_WRITE_CHANS << 16;
4800
4801         val |= (0x2 << 20) | (1 << 11);
4802
4803         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4804                 val |= (1 << 23);
4805
4806         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4807             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4808                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4809
4810         REG_WR(bp, BNX2_DMA_CONFIG, val);
4811
4812         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4813                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4814                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4815                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4816         }
4817
4818         if (bp->flags & BNX2_FLAG_PCIX) {
4819                 u16 val16;
4820
4821                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4822                                      &val16);
4823                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4824                                       val16 & ~PCI_X_CMD_ERO);
4825         }
4826
4827         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4828                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4829                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4830                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4831
4832         /* Initialize context mapping and zero out the quick contexts.  The
4833          * context block must have already been enabled. */
4834         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4835                 rc = bnx2_init_5709_context(bp);
4836                 if (rc)
4837                         return rc;
4838         } else
4839                 bnx2_init_context(bp);
4840
4841         if ((rc = bnx2_init_cpus(bp)) != 0)
4842                 return rc;
4843
4844         bnx2_init_nvram(bp);
4845
4846         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4847
4848         val = REG_RD(bp, BNX2_MQ_CONFIG);
4849         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4850         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4851         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4852                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4853                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4854                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4855         }
4856
4857         REG_WR(bp, BNX2_MQ_CONFIG, val);
4858
4859         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4860         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4861         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4862
4863         val = (BCM_PAGE_BITS - 8) << 24;
4864         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4865
4866         /* Configure page size. */
4867         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4868         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4869         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4870         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4871
4872         val = bp->mac_addr[0] +
4873               (bp->mac_addr[1] << 8) +
4874               (bp->mac_addr[2] << 16) +
4875               bp->mac_addr[3] +
4876               (bp->mac_addr[4] << 8) +
4877               (bp->mac_addr[5] << 16);
4878         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4879
4880         /* Program the MTU.  Also include 4 bytes for CRC32. */
4881         mtu = bp->dev->mtu;
4882         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4883         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4884                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4885         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4886
4887         if (mtu < 1500)
4888                 mtu = 1500;
4889
4890         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4891         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4892         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4893
4894         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4895         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4896                 bp->bnx2_napi[i].last_status_idx = 0;
4897
4898         bp->idle_chk_status_idx = 0xffff;
4899
4900         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4901
4902         /* Set up how to generate a link change interrupt. */
4903         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4904
4905         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4906                (u64) bp->status_blk_mapping & 0xffffffff);
4907         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4908
4909         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4910                (u64) bp->stats_blk_mapping & 0xffffffff);
4911         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4912                (u64) bp->stats_blk_mapping >> 32);
4913
4914         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4915                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4916
4917         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4918                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4919
4920         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4921                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4922
4923         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4924
4925         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4926
4927         REG_WR(bp, BNX2_HC_COM_TICKS,
4928                (bp->com_ticks_int << 16) | bp->com_ticks);
4929
4930         REG_WR(bp, BNX2_HC_CMD_TICKS,
4931                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4932
4933         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4934                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4935         else
4936                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4937         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4938
4939         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4940                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4941         else {
4942                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4943                       BNX2_HC_CONFIG_COLLECT_STATS;
4944         }
4945
4946         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4947                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4948                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4949
4950                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4951         }
4952
4953         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4954                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4955
4956         REG_WR(bp, BNX2_HC_CONFIG, val);
4957
4958         for (i = 1; i < bp->irq_nvecs; i++) {
4959                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4960                            BNX2_HC_SB_CONFIG_1;
4961
4962                 REG_WR(bp, base,
4963                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4964                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4965                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4966
4967                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4968                         (bp->tx_quick_cons_trip_int << 16) |
4969                          bp->tx_quick_cons_trip);
4970
4971                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4972                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4973
4974                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4975                        (bp->rx_quick_cons_trip_int << 16) |
4976                         bp->rx_quick_cons_trip);
4977
4978                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4979                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4980         }
4981
4982         /* Clear internal stats counters. */
4983         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4984
4985         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4986
4987         /* Initialize the receive filter. */
4988         bnx2_set_rx_mode(bp->dev);
4989
4990         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4991                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4992                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4993                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4994         }
4995         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4996                           1, 0);
4997
4998         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4999         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5000
5001         udelay(20);
5002
5003         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5004
5005         return rc;
5006 }
5007
5008 static void
5009 bnx2_clear_ring_states(struct bnx2 *bp)
5010 {
5011         struct bnx2_napi *bnapi;
5012         struct bnx2_tx_ring_info *txr;
5013         struct bnx2_rx_ring_info *rxr;
5014         int i;
5015
5016         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5017                 bnapi = &bp->bnx2_napi[i];
5018                 txr = &bnapi->tx_ring;
5019                 rxr = &bnapi->rx_ring;
5020
5021                 txr->tx_cons = 0;
5022                 txr->hw_tx_cons = 0;
5023                 rxr->rx_prod_bseq = 0;
5024                 rxr->rx_prod = 0;
5025                 rxr->rx_cons = 0;
5026                 rxr->rx_pg_prod = 0;
5027                 rxr->rx_pg_cons = 0;
5028         }
5029 }
5030
5031 static void
5032 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5033 {
5034         u32 val, offset0, offset1, offset2, offset3;
5035         u32 cid_addr = GET_CID_ADDR(cid);
5036
5037         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5038                 offset0 = BNX2_L2CTX_TYPE_XI;
5039                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5040                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5041                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5042         } else {
5043                 offset0 = BNX2_L2CTX_TYPE;
5044                 offset1 = BNX2_L2CTX_CMD_TYPE;
5045                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5046                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5047         }
5048         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5049         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5050
5051         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5052         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5053
5054         val = (u64) txr->tx_desc_mapping >> 32;
5055         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5056
5057         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5058         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5059 }
5060
5061 static void
5062 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5063 {
5064         struct tx_bd *txbd;
5065         u32 cid = TX_CID;
5066         struct bnx2_napi *bnapi;
5067         struct bnx2_tx_ring_info *txr;
5068
5069         bnapi = &bp->bnx2_napi[ring_num];
5070         txr = &bnapi->tx_ring;
5071
5072         if (ring_num == 0)
5073                 cid = TX_CID;
5074         else
5075                 cid = TX_TSS_CID + ring_num - 1;
5076
5077         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5078
5079         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5080
5081         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5082         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5083
5084         txr->tx_prod = 0;
5085         txr->tx_prod_bseq = 0;
5086
5087         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5088         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5089
5090         bnx2_init_tx_context(bp, cid, txr);
5091 }
5092
5093 static void
5094 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5095                      int num_rings)
5096 {
5097         int i;
5098         struct rx_bd *rxbd;
5099
5100         for (i = 0; i < num_rings; i++) {
5101                 int j;
5102
5103                 rxbd = &rx_ring[i][0];
5104                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5105                         rxbd->rx_bd_len = buf_size;
5106                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5107                 }
5108                 if (i == (num_rings - 1))
5109                         j = 0;
5110                 else
5111                         j = i + 1;
5112                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5113                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5114         }
5115 }
5116
5117 static void
5118 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5119 {
5120         int i;
5121         u16 prod, ring_prod;
5122         u32 cid, rx_cid_addr, val;
5123         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5124         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5125
5126         if (ring_num == 0)
5127                 cid = RX_CID;
5128         else
5129                 cid = RX_RSS_CID + ring_num - 1;
5130
5131         rx_cid_addr = GET_CID_ADDR(cid);
5132
5133         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5134                              bp->rx_buf_use_size, bp->rx_max_ring);
5135
5136         bnx2_init_rx_context(bp, cid);
5137
5138         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5139                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5140                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5141         }
5142
5143         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5144         if (bp->rx_pg_ring_size) {
5145                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5146                                      rxr->rx_pg_desc_mapping,
5147                                      PAGE_SIZE, bp->rx_max_pg_ring);
5148                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5149                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5150                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5151                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5152
5153                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5154                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5155
5156                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5157                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5158
5159                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5160                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5161         }
5162
5163         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5164         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5165
5166         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5167         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5168
5169         ring_prod = prod = rxr->rx_pg_prod;
5170         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5171                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5172                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5173                                     ring_num, i, bp->rx_pg_ring_size);
5174                         break;
5175                 }
5176                 prod = NEXT_RX_BD(prod);
5177                 ring_prod = RX_PG_RING_IDX(prod);
5178         }
5179         rxr->rx_pg_prod = prod;
5180
5181         ring_prod = prod = rxr->rx_prod;
5182         for (i = 0; i < bp->rx_ring_size; i++) {
5183                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5184                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5185                                     ring_num, i, bp->rx_ring_size);
5186                         break;
5187                 }
5188                 prod = NEXT_RX_BD(prod);
5189                 ring_prod = RX_RING_IDX(prod);
5190         }
5191         rxr->rx_prod = prod;
5192
5193         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5194         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5195         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5196
5197         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5198         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5199
5200         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5201 }
5202
5203 static void
5204 bnx2_init_all_rings(struct bnx2 *bp)
5205 {
5206         int i;
5207         u32 val;
5208
5209         bnx2_clear_ring_states(bp);
5210
5211         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5212         for (i = 0; i < bp->num_tx_rings; i++)
5213                 bnx2_init_tx_ring(bp, i);
5214
5215         if (bp->num_tx_rings > 1)
5216                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5217                        (TX_TSS_CID << 7));
5218
5219         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5220         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5221
5222         for (i = 0; i < bp->num_rx_rings; i++)
5223                 bnx2_init_rx_ring(bp, i);
5224
5225         if (bp->num_rx_rings > 1) {
5226                 u32 tbl_32;
5227                 u8 *tbl = (u8 *) &tbl_32;
5228
5229                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5230                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5231
5232                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5233                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5234                         if ((i % 4) == 3)
5235                                 bnx2_reg_wr_ind(bp,
5236                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5237                                                 cpu_to_be32(tbl_32));
5238                 }
5239
5240                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5241                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5242
5243                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5244
5245         }
5246 }
5247
5248 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5249 {
5250         u32 max, num_rings = 1;
5251
5252         while (ring_size > MAX_RX_DESC_CNT) {
5253                 ring_size -= MAX_RX_DESC_CNT;
5254                 num_rings++;
5255         }
5256         /* round to next power of 2 */
5257         max = max_size;
5258         while ((max & num_rings) == 0)
5259                 max >>= 1;
5260
5261         if (num_rings != max)
5262                 max <<= 1;
5263
5264         return max;
5265 }
5266
5267 static void
5268 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5269 {
5270         u32 rx_size, rx_space, jumbo_size;
5271
5272         /* 8 for CRC and VLAN */
5273         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5274
5275         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5276                 sizeof(struct skb_shared_info);
5277
5278         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5279         bp->rx_pg_ring_size = 0;
5280         bp->rx_max_pg_ring = 0;
5281         bp->rx_max_pg_ring_idx = 0;
5282         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5283                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5284
5285                 jumbo_size = size * pages;
5286                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5287                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5288
5289                 bp->rx_pg_ring_size = jumbo_size;
5290                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5291                                                         MAX_RX_PG_RINGS);
5292                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5293                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5294                 bp->rx_copy_thresh = 0;
5295         }
5296
5297         bp->rx_buf_use_size = rx_size;
5298         /* hw alignment */
5299         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5300         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5301         bp->rx_ring_size = size;
5302         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5303         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5304 }
5305
5306 static void
5307 bnx2_free_tx_skbs(struct bnx2 *bp)
5308 {
5309         int i;
5310
5311         for (i = 0; i < bp->num_tx_rings; i++) {
5312                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5313                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5314                 int j;
5315
5316                 if (txr->tx_buf_ring == NULL)
5317                         continue;
5318
5319                 for (j = 0; j < TX_DESC_CNT; ) {
5320                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5321                         struct sk_buff *skb = tx_buf->skb;
5322                         int k, last;
5323
5324                         if (skb == NULL) {
5325                                 j++;
5326                                 continue;
5327                         }
5328
5329                         pci_unmap_single(bp->pdev,
5330                                          dma_unmap_addr(tx_buf, mapping),
5331                                          skb_headlen(skb),
5332                                          PCI_DMA_TODEVICE);
5333
5334                         tx_buf->skb = NULL;
5335
5336                         last = tx_buf->nr_frags;
5337                         j++;
5338                         for (k = 0; k < last; k++, j++) {
5339                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5340                                 pci_unmap_page(bp->pdev,
5341                                         dma_unmap_addr(tx_buf, mapping),
5342                                         skb_shinfo(skb)->frags[k].size,
5343                                         PCI_DMA_TODEVICE);
5344                         }
5345                         dev_kfree_skb(skb);
5346                 }
5347         }
5348 }
5349
5350 static void
5351 bnx2_free_rx_skbs(struct bnx2 *bp)
5352 {
5353         int i;
5354
5355         for (i = 0; i < bp->num_rx_rings; i++) {
5356                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5358                 int j;
5359
5360                 if (rxr->rx_buf_ring == NULL)
5361                         return;
5362
5363                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5364                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5365                         struct sk_buff *skb = rx_buf->skb;
5366
5367                         if (skb == NULL)
5368                                 continue;
5369
5370                         pci_unmap_single(bp->pdev,
5371                                          dma_unmap_addr(rx_buf, mapping),
5372                                          bp->rx_buf_use_size,
5373                                          PCI_DMA_FROMDEVICE);
5374
5375                         rx_buf->skb = NULL;
5376
5377                         dev_kfree_skb(skb);
5378                 }
5379                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5380                         bnx2_free_rx_page(bp, rxr, j);
5381         }
5382 }
5383
5384 static void
5385 bnx2_free_skbs(struct bnx2 *bp)
5386 {
5387         bnx2_free_tx_skbs(bp);
5388         bnx2_free_rx_skbs(bp);
5389 }
5390
5391 static int
5392 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5393 {
5394         int rc;
5395
5396         rc = bnx2_reset_chip(bp, reset_code);
5397         bnx2_free_skbs(bp);
5398         if (rc)
5399                 return rc;
5400
5401         if ((rc = bnx2_init_chip(bp)) != 0)
5402                 return rc;
5403
5404         bnx2_init_all_rings(bp);
5405         return 0;
5406 }
5407
5408 static int
5409 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5410 {
5411         int rc;
5412
5413         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5414                 return rc;
5415
5416         spin_lock_bh(&bp->phy_lock);
5417         bnx2_init_phy(bp, reset_phy);
5418         bnx2_set_link(bp);
5419         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5420                 bnx2_remote_phy_event(bp);
5421         spin_unlock_bh(&bp->phy_lock);
5422         return 0;
5423 }
5424
5425 static int
5426 bnx2_shutdown_chip(struct bnx2 *bp)
5427 {
5428         u32 reset_code;
5429
5430         if (bp->flags & BNX2_FLAG_NO_WOL)
5431                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5432         else if (bp->wol)
5433                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5434         else
5435                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5436
5437         return bnx2_reset_chip(bp, reset_code);
5438 }
5439
5440 static int
5441 bnx2_test_registers(struct bnx2 *bp)
5442 {
5443         int ret;
5444         int i, is_5709;
5445         static const struct {
5446                 u16   offset;
5447                 u16   flags;
5448 #define BNX2_FL_NOT_5709        1
5449                 u32   rw_mask;
5450                 u32   ro_mask;
5451         } reg_tbl[] = {
5452                 { 0x006c, 0, 0x00000000, 0x0000003f },
5453                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5454                 { 0x0094, 0, 0x00000000, 0x00000000 },
5455
5456                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5457                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5460                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5461                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5462                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5463                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5464                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5465
5466                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5467                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5469                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5470                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5471                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5472
5473                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5474                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5475                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5476
5477                 { 0x1000, 0, 0x00000000, 0x00000001 },
5478                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5479
5480                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5481                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5482                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5483                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5484                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5485                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5486                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5487                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5488                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5489                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5490
5491                 { 0x1800, 0, 0x00000000, 0x00000001 },
5492                 { 0x1804, 0, 0x00000000, 0x00000003 },
5493
5494                 { 0x2800, 0, 0x00000000, 0x00000001 },
5495                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5496                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5497                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5498                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5499                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5500                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5501                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5502                 { 0x2840, 0, 0x00000000, 0xffffffff },
5503                 { 0x2844, 0, 0x00000000, 0xffffffff },
5504                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5505                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5506
5507                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5508                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5509
5510                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5511                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5512                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5513                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5514                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5515                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5516                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5517                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5518                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5519
5520                 { 0x5004, 0, 0x00000000, 0x0000007f },
5521                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5522
5523                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5524                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5525                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5526                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5527                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5528                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5529                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5530                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5531                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5532
5533                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5534                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5535                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5536                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5537                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5538                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5539                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5540                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5541                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5542                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5543                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5544                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5545                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5546                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5547                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5548                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5549                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5550                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5551                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5552                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5553                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5554                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5555                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5556
5557                 { 0xffff, 0, 0x00000000, 0x00000000 },
5558         };
5559
5560         ret = 0;
5561         is_5709 = 0;
5562         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5563                 is_5709 = 1;
5564
5565         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5566                 u32 offset, rw_mask, ro_mask, save_val, val;
5567                 u16 flags = reg_tbl[i].flags;
5568
5569                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5570                         continue;
5571
5572                 offset = (u32) reg_tbl[i].offset;
5573                 rw_mask = reg_tbl[i].rw_mask;
5574                 ro_mask = reg_tbl[i].ro_mask;
5575
5576                 save_val = readl(bp->regview + offset);
5577
5578                 writel(0, bp->regview + offset);
5579
5580                 val = readl(bp->regview + offset);
5581                 if ((val & rw_mask) != 0) {
5582                         goto reg_test_err;
5583                 }
5584
5585                 if ((val & ro_mask) != (save_val & ro_mask)) {
5586                         goto reg_test_err;
5587                 }
5588
5589                 writel(0xffffffff, bp->regview + offset);
5590
5591                 val = readl(bp->regview + offset);
5592                 if ((val & rw_mask) != rw_mask) {
5593                         goto reg_test_err;
5594                 }
5595
5596                 if ((val & ro_mask) != (save_val & ro_mask)) {
5597                         goto reg_test_err;
5598                 }
5599
5600                 writel(save_val, bp->regview + offset);
5601                 continue;
5602
5603 reg_test_err:
5604                 writel(save_val, bp->regview + offset);
5605                 ret = -ENODEV;
5606                 break;
5607         }
5608         return ret;
5609 }
5610
5611 static int
5612 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5613 {
5614         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5615                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5616         int i;
5617
5618         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5619                 u32 offset;
5620
5621                 for (offset = 0; offset < size; offset += 4) {
5622
5623                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5624
5625                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5626                                 test_pattern[i]) {
5627                                 return -ENODEV;
5628                         }
5629                 }
5630         }
5631         return 0;
5632 }
5633
5634 static int
5635 bnx2_test_memory(struct bnx2 *bp)
5636 {
5637         int ret = 0;
5638         int i;
5639         static struct mem_entry {
5640                 u32   offset;
5641                 u32   len;
5642         } mem_tbl_5706[] = {
5643                 { 0x60000,  0x4000 },
5644                 { 0xa0000,  0x3000 },
5645                 { 0xe0000,  0x4000 },
5646                 { 0x120000, 0x4000 },
5647                 { 0x1a0000, 0x4000 },
5648                 { 0x160000, 0x4000 },
5649                 { 0xffffffff, 0    },
5650         },
5651         mem_tbl_5709[] = {
5652                 { 0x60000,  0x4000 },
5653                 { 0xa0000,  0x3000 },
5654                 { 0xe0000,  0x4000 },
5655                 { 0x120000, 0x4000 },
5656                 { 0x1a0000, 0x4000 },
5657                 { 0xffffffff, 0    },
5658         };
5659         struct mem_entry *mem_tbl;
5660
5661         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5662                 mem_tbl = mem_tbl_5709;
5663         else
5664                 mem_tbl = mem_tbl_5706;
5665
5666         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5667                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5668                         mem_tbl[i].len)) != 0) {
5669                         return ret;
5670                 }
5671         }
5672
5673         return ret;
5674 }
5675
5676 #define BNX2_MAC_LOOPBACK       0
5677 #define BNX2_PHY_LOOPBACK       1
5678
5679 static int
5680 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5681 {
5682         unsigned int pkt_size, num_pkts, i;
5683         struct sk_buff *skb, *rx_skb;
5684         unsigned char *packet;
5685         u16 rx_start_idx, rx_idx;
5686         dma_addr_t map;
5687         struct tx_bd *txbd;
5688         struct sw_bd *rx_buf;
5689         struct l2_fhdr *rx_hdr;
5690         int ret = -ENODEV;
5691         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5692         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5693         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5694
5695         tx_napi = bnapi;
5696
5697         txr = &tx_napi->tx_ring;
5698         rxr = &bnapi->rx_ring;
5699         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5700                 bp->loopback = MAC_LOOPBACK;
5701                 bnx2_set_mac_loopback(bp);
5702         }
5703         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5704                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5705                         return 0;
5706
5707                 bp->loopback = PHY_LOOPBACK;
5708                 bnx2_set_phy_loopback(bp);
5709         }
5710         else
5711                 return -EINVAL;
5712
5713         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5714         skb = netdev_alloc_skb(bp->dev, pkt_size);
5715         if (!skb)
5716                 return -ENOMEM;
5717         packet = skb_put(skb, pkt_size);
5718         memcpy(packet, bp->dev->dev_addr, 6);
5719         memset(packet + 6, 0x0, 8);
5720         for (i = 14; i < pkt_size; i++)
5721                 packet[i] = (unsigned char) (i & 0xff);
5722
5723         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5724                 PCI_DMA_TODEVICE);
5725         if (pci_dma_mapping_error(bp->pdev, map)) {
5726                 dev_kfree_skb(skb);
5727                 return -EIO;
5728         }
5729
5730         REG_WR(bp, BNX2_HC_COMMAND,
5731                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5732
5733         REG_RD(bp, BNX2_HC_COMMAND);
5734
5735         udelay(5);
5736         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5737
5738         num_pkts = 0;
5739
5740         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5741
5742         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5743         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5744         txbd->tx_bd_mss_nbytes = pkt_size;
5745         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5746
5747         num_pkts++;
5748         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5749         txr->tx_prod_bseq += pkt_size;
5750
5751         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5752         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5753
5754         udelay(100);
5755
5756         REG_WR(bp, BNX2_HC_COMMAND,
5757                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5758
5759         REG_RD(bp, BNX2_HC_COMMAND);
5760
5761         udelay(5);
5762
5763         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5764         dev_kfree_skb(skb);
5765
5766         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5767                 goto loopback_test_done;
5768
5769         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5770         if (rx_idx != rx_start_idx + num_pkts) {
5771                 goto loopback_test_done;
5772         }
5773
5774         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5775         rx_skb = rx_buf->skb;
5776
5777         rx_hdr = rx_buf->desc;
5778         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5779
5780         pci_dma_sync_single_for_cpu(bp->pdev,
5781                 dma_unmap_addr(rx_buf, mapping),
5782                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5783
5784         if (rx_hdr->l2_fhdr_status &
5785                 (L2_FHDR_ERRORS_BAD_CRC |
5786                 L2_FHDR_ERRORS_PHY_DECODE |
5787                 L2_FHDR_ERRORS_ALIGNMENT |
5788                 L2_FHDR_ERRORS_TOO_SHORT |
5789                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5790
5791                 goto loopback_test_done;
5792         }
5793
5794         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5795                 goto loopback_test_done;
5796         }
5797
5798         for (i = 14; i < pkt_size; i++) {
5799                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5800                         goto loopback_test_done;
5801                 }
5802         }
5803
5804         ret = 0;
5805
5806 loopback_test_done:
5807         bp->loopback = 0;
5808         return ret;
5809 }
5810
5811 #define BNX2_MAC_LOOPBACK_FAILED        1
5812 #define BNX2_PHY_LOOPBACK_FAILED        2
5813 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5814                                          BNX2_PHY_LOOPBACK_FAILED)
5815
5816 static int
5817 bnx2_test_loopback(struct bnx2 *bp)
5818 {
5819         int rc = 0;
5820
5821         if (!netif_running(bp->dev))
5822                 return BNX2_LOOPBACK_FAILED;
5823
5824         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5825         spin_lock_bh(&bp->phy_lock);
5826         bnx2_init_phy(bp, 1);
5827         spin_unlock_bh(&bp->phy_lock);
5828         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5829                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5830         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5831                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5832         return rc;
5833 }
5834
5835 #define NVRAM_SIZE 0x200
5836 #define CRC32_RESIDUAL 0xdebb20e3
5837
5838 static int
5839 bnx2_test_nvram(struct bnx2 *bp)
5840 {
5841         __be32 buf[NVRAM_SIZE / 4];
5842         u8 *data = (u8 *) buf;
5843         int rc = 0;
5844         u32 magic, csum;
5845
5846         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5847                 goto test_nvram_done;
5848
5849         magic = be32_to_cpu(buf[0]);
5850         if (magic != 0x669955aa) {
5851                 rc = -ENODEV;
5852                 goto test_nvram_done;
5853         }
5854
5855         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5856                 goto test_nvram_done;
5857
5858         csum = ether_crc_le(0x100, data);
5859         if (csum != CRC32_RESIDUAL) {
5860                 rc = -ENODEV;
5861                 goto test_nvram_done;
5862         }
5863
5864         csum = ether_crc_le(0x100, data + 0x100);
5865         if (csum != CRC32_RESIDUAL) {
5866                 rc = -ENODEV;
5867         }
5868
5869 test_nvram_done:
5870         return rc;
5871 }
5872
5873 static int
5874 bnx2_test_link(struct bnx2 *bp)
5875 {
5876         u32 bmsr;
5877
5878         if (!netif_running(bp->dev))
5879                 return -ENODEV;
5880
5881         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5882                 if (bp->link_up)
5883                         return 0;
5884                 return -ENODEV;
5885         }
5886         spin_lock_bh(&bp->phy_lock);
5887         bnx2_enable_bmsr1(bp);
5888         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5889         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5890         bnx2_disable_bmsr1(bp);
5891         spin_unlock_bh(&bp->phy_lock);
5892
5893         if (bmsr & BMSR_LSTATUS) {
5894                 return 0;
5895         }
5896         return -ENODEV;
5897 }
5898
5899 static int
5900 bnx2_test_intr(struct bnx2 *bp)
5901 {
5902         int i;
5903         u16 status_idx;
5904
5905         if (!netif_running(bp->dev))
5906                 return -ENODEV;
5907
5908         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5909
5910         /* This register is not touched during run-time. */
5911         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5912         REG_RD(bp, BNX2_HC_COMMAND);
5913
5914         for (i = 0; i < 10; i++) {
5915                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5916                         status_idx) {
5917
5918                         break;
5919                 }
5920
5921                 msleep_interruptible(10);
5922         }
5923         if (i < 10)
5924                 return 0;
5925
5926         return -ENODEV;
5927 }
5928
5929 /* Determining link for parallel detection. */
5930 static int
5931 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5932 {
5933         u32 mode_ctl, an_dbg, exp;
5934
5935         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5936                 return 0;
5937
5938         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5939         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5940
5941         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5942                 return 0;
5943
5944         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5945         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5946         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5947
5948         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5949                 return 0;
5950
5951         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5952         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5953         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5954
5955         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5956                 return 0;
5957
5958         return 1;
5959 }
5960
5961 static void
5962 bnx2_5706_serdes_timer(struct bnx2 *bp)
5963 {
5964         int check_link = 1;
5965
5966         spin_lock(&bp->phy_lock);
5967         if (bp->serdes_an_pending) {
5968                 bp->serdes_an_pending--;
5969                 check_link = 0;
5970         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5971                 u32 bmcr;
5972
5973                 bp->current_interval = BNX2_TIMER_INTERVAL;
5974
5975                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5976
5977                 if (bmcr & BMCR_ANENABLE) {
5978                         if (bnx2_5706_serdes_has_link(bp)) {
5979                                 bmcr &= ~BMCR_ANENABLE;
5980                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5981                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5982                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5983                         }
5984                 }
5985         }
5986         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5987                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5988                 u32 phy2;
5989
5990                 bnx2_write_phy(bp, 0x17, 0x0f01);
5991                 bnx2_read_phy(bp, 0x15, &phy2);
5992                 if (phy2 & 0x20) {
5993                         u32 bmcr;
5994
5995                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5996                         bmcr |= BMCR_ANENABLE;
5997                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5998
5999                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6000                 }
6001         } else
6002                 bp->current_interval = BNX2_TIMER_INTERVAL;
6003
6004         if (check_link) {
6005                 u32 val;
6006
6007                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6009                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6010
6011                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6012                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6013                                 bnx2_5706s_force_link_dn(bp, 1);
6014                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6015                         } else
6016                                 bnx2_set_link(bp);
6017                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6018                         bnx2_set_link(bp);
6019         }
6020         spin_unlock(&bp->phy_lock);
6021 }
6022
6023 static void
6024 bnx2_5708_serdes_timer(struct bnx2 *bp)
6025 {
6026         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6027                 return;
6028
6029         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6030                 bp->serdes_an_pending = 0;
6031                 return;
6032         }
6033
6034         spin_lock(&bp->phy_lock);
6035         if (bp->serdes_an_pending)
6036                 bp->serdes_an_pending--;
6037         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6038                 u32 bmcr;
6039
6040                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6041                 if (bmcr & BMCR_ANENABLE) {
6042                         bnx2_enable_forced_2g5(bp);
6043                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6044                 } else {
6045                         bnx2_disable_forced_2g5(bp);
6046                         bp->serdes_an_pending = 2;
6047                         bp->current_interval = BNX2_TIMER_INTERVAL;
6048                 }