x86/PCI: truncate _CRS windows with _LEN > _MAX - _MIN + 1
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.8"
62 #define DRV_MODULE_RELDATE      "Feb 15, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251         u32 diff;
252
253         smp_mb();
254
255         /* The ring uses 256 indices for 255 entries, one of them
256          * needs to be skipped.
257          */
258         diff = txr->tx_prod - txr->tx_cons;
259         if (unlikely(diff >= TX_DESC_CNT)) {
260                 diff &= 0xffff;
261                 if (diff == TX_DESC_CNT)
262                         diff = MAX_TX_DESC_CNT;
263         }
264         return (bp->tx_ring_size - diff);
265 }
266
267 static u32
268 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
269 {
270         u32 val;
271
272         spin_lock_bh(&bp->indirect_lock);
273         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
274         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
275         spin_unlock_bh(&bp->indirect_lock);
276         return val;
277 }
278
279 static void
280 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
281 {
282         spin_lock_bh(&bp->indirect_lock);
283         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
285         spin_unlock_bh(&bp->indirect_lock);
286 }
287
288 static void
289 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
290 {
291         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
292 }
293
294 static u32
295 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
296 {
297         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
298 }
299
300 static void
301 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 {
303         offset += cid_addr;
304         spin_lock_bh(&bp->indirect_lock);
305         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
306                 int i;
307
308                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
309                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
310                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311                 for (i = 0; i < 5; i++) {
312                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
313                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314                                 break;
315                         udelay(5);
316                 }
317         } else {
318                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
319                 REG_WR(bp, BNX2_CTX_DATA, val);
320         }
321         spin_unlock_bh(&bp->indirect_lock);
322 }
323
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328         struct bnx2 *bp = netdev_priv(dev);
329         struct drv_ctl_io *io = &info->data.io;
330
331         switch (info->cmd) {
332         case DRV_CTL_IO_WR_CMD:
333                 bnx2_reg_wr_ind(bp, io->offset, io->data);
334                 break;
335         case DRV_CTL_IO_RD_CMD:
336                 io->data = bnx2_reg_rd_ind(bp, io->offset);
337                 break;
338         case DRV_CTL_CTX_WR_CMD:
339                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340                 break;
341         default:
342                 return -EINVAL;
343         }
344         return 0;
345 }
346
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351         int sb_id;
352
353         if (bp->flags & BNX2_FLAG_USING_MSIX) {
354                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355                 bnapi->cnic_present = 0;
356                 sb_id = bp->irq_nvecs;
357                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358         } else {
359                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360                 bnapi->cnic_tag = bnapi->last_status_idx;
361                 bnapi->cnic_present = 1;
362                 sb_id = 0;
363                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364         }
365
366         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367         cp->irq_arr[0].status_blk = (void *)
368                 ((unsigned long) bnapi->status_blk.msi +
369                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370         cp->irq_arr[0].status_blk_num = sb_id;
371         cp->num_irq = 1;
372 }
373
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375                               void *data)
376 {
377         struct bnx2 *bp = netdev_priv(dev);
378         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379
380         if (ops == NULL)
381                 return -EINVAL;
382
383         if (cp->drv_state & CNIC_DRV_STATE_REGD)
384                 return -EBUSY;
385
386         bp->cnic_data = data;
387         rcu_assign_pointer(bp->cnic_ops, ops);
388
389         cp->num_irq = 0;
390         cp->drv_state = CNIC_DRV_STATE_REGD;
391
392         bnx2_setup_cnic_irq_info(bp);
393
394         return 0;
395 }
396
397 static int bnx2_unregister_cnic(struct net_device *dev)
398 {
399         struct bnx2 *bp = netdev_priv(dev);
400         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
401         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402
403         mutex_lock(&bp->cnic_lock);
404         cp->drv_state = 0;
405         bnapi->cnic_present = 0;
406         rcu_assign_pointer(bp->cnic_ops, NULL);
407         mutex_unlock(&bp->cnic_lock);
408         synchronize_rcu();
409         return 0;
410 }
411
412 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
413 {
414         struct bnx2 *bp = netdev_priv(dev);
415         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
416
417         cp->drv_owner = THIS_MODULE;
418         cp->chip_id = bp->chip_id;
419         cp->pdev = bp->pdev;
420         cp->io_base = bp->regview;
421         cp->drv_ctl = bnx2_drv_ctl;
422         cp->drv_register_cnic = bnx2_register_cnic;
423         cp->drv_unregister_cnic = bnx2_unregister_cnic;
424
425         return cp;
426 }
427 EXPORT_SYMBOL(bnx2_cnic_probe);
428
429 static void
430 bnx2_cnic_stop(struct bnx2 *bp)
431 {
432         struct cnic_ops *c_ops;
433         struct cnic_ctl_info info;
434
435         mutex_lock(&bp->cnic_lock);
436         c_ops = bp->cnic_ops;
437         if (c_ops) {
438                 info.cmd = CNIC_CTL_STOP_CMD;
439                 c_ops->cnic_ctl(bp->cnic_data, &info);
440         }
441         mutex_unlock(&bp->cnic_lock);
442 }
443
444 static void
445 bnx2_cnic_start(struct bnx2 *bp)
446 {
447         struct cnic_ops *c_ops;
448         struct cnic_ctl_info info;
449
450         mutex_lock(&bp->cnic_lock);
451         c_ops = bp->cnic_ops;
452         if (c_ops) {
453                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
454                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
455
456                         bnapi->cnic_tag = bnapi->last_status_idx;
457                 }
458                 info.cmd = CNIC_CTL_START_CMD;
459                 c_ops->cnic_ctl(bp->cnic_data, &info);
460         }
461         mutex_unlock(&bp->cnic_lock);
462 }
463
464 #else
465
466 static void
467 bnx2_cnic_stop(struct bnx2 *bp)
468 {
469 }
470
471 static void
472 bnx2_cnic_start(struct bnx2 *bp)
473 {
474 }
475
476 #endif
477
478 static int
479 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
480 {
481         u32 val1;
482         int i, ret;
483
484         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
485                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
486                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
487
488                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
489                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490
491                 udelay(40);
492         }
493
494         val1 = (bp->phy_addr << 21) | (reg << 16) |
495                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
496                 BNX2_EMAC_MDIO_COMM_START_BUSY;
497         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
498
499         for (i = 0; i < 50; i++) {
500                 udelay(10);
501
502                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
503                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
504                         udelay(5);
505
506                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
508
509                         break;
510                 }
511         }
512
513         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
514                 *val = 0x0;
515                 ret = -EBUSY;
516         }
517         else {
518                 *val = val1;
519                 ret = 0;
520         }
521
522         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
523                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
524                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
525
526                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
527                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528
529                 udelay(40);
530         }
531
532         return ret;
533 }
534
535 static int
536 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
537 {
538         u32 val1;
539         int i, ret;
540
541         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
542                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
543                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
544
545                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
546                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547
548                 udelay(40);
549         }
550
551         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
552                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
553                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
554         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
555
556         for (i = 0; i < 50; i++) {
557                 udelay(10);
558
559                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
560                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
561                         udelay(5);
562                         break;
563                 }
564         }
565
566         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
567                 ret = -EBUSY;
568         else
569                 ret = 0;
570
571         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
572                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
573                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
574
575                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
576                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577
578                 udelay(40);
579         }
580
581         return ret;
582 }
583
584 static void
585 bnx2_disable_int(struct bnx2 *bp)
586 {
587         int i;
588         struct bnx2_napi *bnapi;
589
590         for (i = 0; i < bp->irq_nvecs; i++) {
591                 bnapi = &bp->bnx2_napi[i];
592                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
593                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
594         }
595         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
596 }
597
598 static void
599 bnx2_enable_int(struct bnx2 *bp)
600 {
601         int i;
602         struct bnx2_napi *bnapi;
603
604         for (i = 0; i < bp->irq_nvecs; i++) {
605                 bnapi = &bp->bnx2_napi[i];
606
607                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
608                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
609                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
610                        bnapi->last_status_idx);
611
612                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614                        bnapi->last_status_idx);
615         }
616         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
617 }
618
619 static void
620 bnx2_disable_int_sync(struct bnx2 *bp)
621 {
622         int i;
623
624         atomic_inc(&bp->intr_sem);
625         if (!netif_running(bp->dev))
626                 return;
627
628         bnx2_disable_int(bp);
629         for (i = 0; i < bp->irq_nvecs; i++)
630                 synchronize_irq(bp->irq_tbl[i].vector);
631 }
632
633 static void
634 bnx2_napi_disable(struct bnx2 *bp)
635 {
636         int i;
637
638         for (i = 0; i < bp->irq_nvecs; i++)
639                 napi_disable(&bp->bnx2_napi[i].napi);
640 }
641
642 static void
643 bnx2_napi_enable(struct bnx2 *bp)
644 {
645         int i;
646
647         for (i = 0; i < bp->irq_nvecs; i++)
648                 napi_enable(&bp->bnx2_napi[i].napi);
649 }
650
651 static void
652 bnx2_netif_stop(struct bnx2 *bp)
653 {
654         bnx2_cnic_stop(bp);
655         if (netif_running(bp->dev)) {
656                 int i;
657
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 /* prevent tx timeout */
661                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
662                         struct netdev_queue *txq;
663
664                         txq = netdev_get_tx_queue(bp->dev, i);
665                         txq->trans_start = jiffies;
666                 }
667         }
668         bnx2_disable_int_sync(bp);
669 }
670
671 static void
672 bnx2_netif_start(struct bnx2 *bp)
673 {
674         if (atomic_dec_and_test(&bp->intr_sem)) {
675                 if (netif_running(bp->dev)) {
676                         netif_tx_wake_all_queues(bp->dev);
677                         bnx2_napi_enable(bp);
678                         bnx2_enable_int(bp);
679                         bnx2_cnic_start(bp);
680                 }
681         }
682 }
683
684 static void
685 bnx2_free_tx_mem(struct bnx2 *bp)
686 {
687         int i;
688
689         for (i = 0; i < bp->num_tx_rings; i++) {
690                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
691                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
692
693                 if (txr->tx_desc_ring) {
694                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
695                                             txr->tx_desc_ring,
696                                             txr->tx_desc_mapping);
697                         txr->tx_desc_ring = NULL;
698                 }
699                 kfree(txr->tx_buf_ring);
700                 txr->tx_buf_ring = NULL;
701         }
702 }
703
704 static void
705 bnx2_free_rx_mem(struct bnx2 *bp)
706 {
707         int i;
708
709         for (i = 0; i < bp->num_rx_rings; i++) {
710                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
711                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
712                 int j;
713
714                 for (j = 0; j < bp->rx_max_ring; j++) {
715                         if (rxr->rx_desc_ring[j])
716                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
717                                                     rxr->rx_desc_ring[j],
718                                                     rxr->rx_desc_mapping[j]);
719                         rxr->rx_desc_ring[j] = NULL;
720                 }
721                 vfree(rxr->rx_buf_ring);
722                 rxr->rx_buf_ring = NULL;
723
724                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
725                         if (rxr->rx_pg_desc_ring[j])
726                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
727                                                     rxr->rx_pg_desc_ring[j],
728                                                     rxr->rx_pg_desc_mapping[j]);
729                         rxr->rx_pg_desc_ring[j] = NULL;
730                 }
731                 vfree(rxr->rx_pg_ring);
732                 rxr->rx_pg_ring = NULL;
733         }
734 }
735
736 static int
737 bnx2_alloc_tx_mem(struct bnx2 *bp)
738 {
739         int i;
740
741         for (i = 0; i < bp->num_tx_rings; i++) {
742                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
743                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
744
745                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
746                 if (txr->tx_buf_ring == NULL)
747                         return -ENOMEM;
748
749                 txr->tx_desc_ring =
750                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
751                                              &txr->tx_desc_mapping);
752                 if (txr->tx_desc_ring == NULL)
753                         return -ENOMEM;
754         }
755         return 0;
756 }
757
758 static int
759 bnx2_alloc_rx_mem(struct bnx2 *bp)
760 {
761         int i;
762
763         for (i = 0; i < bp->num_rx_rings; i++) {
764                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
765                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
766                 int j;
767
768                 rxr->rx_buf_ring =
769                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770                 if (rxr->rx_buf_ring == NULL)
771                         return -ENOMEM;
772
773                 memset(rxr->rx_buf_ring, 0,
774                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776                 for (j = 0; j < bp->rx_max_ring; j++) {
777                         rxr->rx_desc_ring[j] =
778                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
779                                                      &rxr->rx_desc_mapping[j]);
780                         if (rxr->rx_desc_ring[j] == NULL)
781                                 return -ENOMEM;
782
783                 }
784
785                 if (bp->rx_pg_ring_size) {
786                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
787                                                   bp->rx_max_pg_ring);
788                         if (rxr->rx_pg_ring == NULL)
789                                 return -ENOMEM;
790
791                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
792                                bp->rx_max_pg_ring);
793                 }
794
795                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
796                         rxr->rx_pg_desc_ring[j] =
797                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
798                                                 &rxr->rx_pg_desc_mapping[j]);
799                         if (rxr->rx_pg_desc_ring[j] == NULL)
800                                 return -ENOMEM;
801
802                 }
803         }
804         return 0;
805 }
806
807 static void
808 bnx2_free_mem(struct bnx2 *bp)
809 {
810         int i;
811         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
812
813         bnx2_free_tx_mem(bp);
814         bnx2_free_rx_mem(bp);
815
816         for (i = 0; i < bp->ctx_pages; i++) {
817                 if (bp->ctx_blk[i]) {
818                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
819                                             bp->ctx_blk[i],
820                                             bp->ctx_blk_mapping[i]);
821                         bp->ctx_blk[i] = NULL;
822                 }
823         }
824         if (bnapi->status_blk.msi) {
825                 pci_free_consistent(bp->pdev, bp->status_stats_size,
826                                     bnapi->status_blk.msi,
827                                     bp->status_blk_mapping);
828                 bnapi->status_blk.msi = NULL;
829                 bp->stats_blk = NULL;
830         }
831 }
832
833 static int
834 bnx2_alloc_mem(struct bnx2 *bp)
835 {
836         int i, status_blk_size, err;
837         struct bnx2_napi *bnapi;
838         void *status_blk;
839
840         /* Combine status and statistics blocks into one allocation. */
841         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842         if (bp->flags & BNX2_FLAG_MSIX_CAP)
843                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
845         bp->status_stats_size = status_blk_size +
846                                 sizeof(struct statistics_block);
847
848         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
849                                           &bp->status_blk_mapping);
850         if (status_blk == NULL)
851                 goto alloc_mem_err;
852
853         memset(status_blk, 0, bp->status_stats_size);
854
855         bnapi = &bp->bnx2_napi[0];
856         bnapi->status_blk.msi = status_blk;
857         bnapi->hw_tx_cons_ptr =
858                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
859         bnapi->hw_rx_cons_ptr =
860                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
861         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
862                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
863                         struct status_block_msix *sblk;
864
865                         bnapi = &bp->bnx2_napi[i];
866
867                         sblk = (void *) (status_blk +
868                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
869                         bnapi->status_blk.msix = sblk;
870                         bnapi->hw_tx_cons_ptr =
871                                 &sblk->status_tx_quick_consumer_index;
872                         bnapi->hw_rx_cons_ptr =
873                                 &sblk->status_rx_quick_consumer_index;
874                         bnapi->int_num = i << 24;
875                 }
876         }
877
878         bp->stats_blk = status_blk + status_blk_size;
879
880         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
881
882         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
883                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
884                 if (bp->ctx_pages == 0)
885                         bp->ctx_pages = 1;
886                 for (i = 0; i < bp->ctx_pages; i++) {
887                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
888                                                 BCM_PAGE_SIZE,
889                                                 &bp->ctx_blk_mapping[i]);
890                         if (bp->ctx_blk[i] == NULL)
891                                 goto alloc_mem_err;
892                 }
893         }
894
895         err = bnx2_alloc_rx_mem(bp);
896         if (err)
897                 goto alloc_mem_err;
898
899         err = bnx2_alloc_tx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         return 0;
904
905 alloc_mem_err:
906         bnx2_free_mem(bp);
907         return -ENOMEM;
908 }
909
910 static void
911 bnx2_report_fw_link(struct bnx2 *bp)
912 {
913         u32 fw_link_status = 0;
914
915         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
916                 return;
917
918         if (bp->link_up) {
919                 u32 bmsr;
920
921                 switch (bp->line_speed) {
922                 case SPEED_10:
923                         if (bp->duplex == DUPLEX_HALF)
924                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
925                         else
926                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
927                         break;
928                 case SPEED_100:
929                         if (bp->duplex == DUPLEX_HALF)
930                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
931                         else
932                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
933                         break;
934                 case SPEED_1000:
935                         if (bp->duplex == DUPLEX_HALF)
936                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
937                         else
938                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
939                         break;
940                 case SPEED_2500:
941                         if (bp->duplex == DUPLEX_HALF)
942                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
943                         else
944                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
945                         break;
946                 }
947
948                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
949
950                 if (bp->autoneg) {
951                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
952
953                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955
956                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
957                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
958                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
959                         else
960                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
961                 }
962         }
963         else
964                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
965
966         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
967 }
968
969 static char *
970 bnx2_xceiver_str(struct bnx2 *bp)
971 {
972         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
973                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
974                  "Copper"));
975 }
976
977 static void
978 bnx2_report_link(struct bnx2 *bp)
979 {
980         if (bp->link_up) {
981                 netif_carrier_on(bp->dev);
982                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
983                             bnx2_xceiver_str(bp),
984                             bp->line_speed,
985                             bp->duplex == DUPLEX_FULL ? "full" : "half");
986
987                 if (bp->flow_ctrl) {
988                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
989                                 pr_cont(", receive ");
990                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
991                                         pr_cont("& transmit ");
992                         }
993                         else {
994                                 pr_cont(", transmit ");
995                         }
996                         pr_cont("flow control ON");
997                 }
998                 pr_cont("\n");
999         } else {
1000                 netif_carrier_off(bp->dev);
1001                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1002                            bnx2_xceiver_str(bp));
1003         }
1004
1005         bnx2_report_fw_link(bp);
1006 }
1007
1008 static void
1009 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1010 {
1011         u32 local_adv, remote_adv;
1012
1013         bp->flow_ctrl = 0;
1014         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1015                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1016
1017                 if (bp->duplex == DUPLEX_FULL) {
1018                         bp->flow_ctrl = bp->req_flow_ctrl;
1019                 }
1020                 return;
1021         }
1022
1023         if (bp->duplex != DUPLEX_FULL) {
1024                 return;
1025         }
1026
1027         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1028             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1029                 u32 val;
1030
1031                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1032                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1033                         bp->flow_ctrl |= FLOW_CTRL_TX;
1034                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1035                         bp->flow_ctrl |= FLOW_CTRL_RX;
1036                 return;
1037         }
1038
1039         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1040         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1041
1042         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1043                 u32 new_local_adv = 0;
1044                 u32 new_remote_adv = 0;
1045
1046                 if (local_adv & ADVERTISE_1000XPAUSE)
1047                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1048                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1049                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1050                 if (remote_adv & ADVERTISE_1000XPAUSE)
1051                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1054
1055                 local_adv = new_local_adv;
1056                 remote_adv = new_remote_adv;
1057         }
1058
1059         /* See Table 28B-3 of 802.3ab-1999 spec. */
1060         if (local_adv & ADVERTISE_PAUSE_CAP) {
1061                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1062                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1063                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1064                         }
1065                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1066                                 bp->flow_ctrl = FLOW_CTRL_RX;
1067                         }
1068                 }
1069                 else {
1070                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                         }
1073                 }
1074         }
1075         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1076                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1077                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1078
1079                         bp->flow_ctrl = FLOW_CTRL_TX;
1080                 }
1081         }
1082 }
1083
1084 static int
1085 bnx2_5709s_linkup(struct bnx2 *bp)
1086 {
1087         u32 val, speed;
1088
1089         bp->link_up = 1;
1090
1091         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1092         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1093         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1094
1095         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1096                 bp->line_speed = bp->req_line_speed;
1097                 bp->duplex = bp->req_duplex;
1098                 return 0;
1099         }
1100         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1101         switch (speed) {
1102                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1103                         bp->line_speed = SPEED_10;
1104                         break;
1105                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1106                         bp->line_speed = SPEED_100;
1107                         break;
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1110                         bp->line_speed = SPEED_1000;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1113                         bp->line_speed = SPEED_2500;
1114                         break;
1115         }
1116         if (val & MII_BNX2_GP_TOP_AN_FD)
1117                 bp->duplex = DUPLEX_FULL;
1118         else
1119                 bp->duplex = DUPLEX_HALF;
1120         return 0;
1121 }
1122
1123 static int
1124 bnx2_5708s_linkup(struct bnx2 *bp)
1125 {
1126         u32 val;
1127
1128         bp->link_up = 1;
1129         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1130         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1131                 case BCM5708S_1000X_STAT1_SPEED_10:
1132                         bp->line_speed = SPEED_10;
1133                         break;
1134                 case BCM5708S_1000X_STAT1_SPEED_100:
1135                         bp->line_speed = SPEED_100;
1136                         break;
1137                 case BCM5708S_1000X_STAT1_SPEED_1G:
1138                         bp->line_speed = SPEED_1000;
1139                         break;
1140                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1141                         bp->line_speed = SPEED_2500;
1142                         break;
1143         }
1144         if (val & BCM5708S_1000X_STAT1_FD)
1145                 bp->duplex = DUPLEX_FULL;
1146         else
1147                 bp->duplex = DUPLEX_HALF;
1148
1149         return 0;
1150 }
1151
1152 static int
1153 bnx2_5706s_linkup(struct bnx2 *bp)
1154 {
1155         u32 bmcr, local_adv, remote_adv, common;
1156
1157         bp->link_up = 1;
1158         bp->line_speed = SPEED_1000;
1159
1160         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1161         if (bmcr & BMCR_FULLDPLX) {
1162                 bp->duplex = DUPLEX_FULL;
1163         }
1164         else {
1165                 bp->duplex = DUPLEX_HALF;
1166         }
1167
1168         if (!(bmcr & BMCR_ANENABLE)) {
1169                 return 0;
1170         }
1171
1172         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1173         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1174
1175         common = local_adv & remote_adv;
1176         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1177
1178                 if (common & ADVERTISE_1000XFULL) {
1179                         bp->duplex = DUPLEX_FULL;
1180                 }
1181                 else {
1182                         bp->duplex = DUPLEX_HALF;
1183                 }
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int
1190 bnx2_copper_linkup(struct bnx2 *bp)
1191 {
1192         u32 bmcr;
1193
1194         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1195         if (bmcr & BMCR_ANENABLE) {
1196                 u32 local_adv, remote_adv, common;
1197
1198                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1199                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1200
1201                 common = local_adv & (remote_adv >> 2);
1202                 if (common & ADVERTISE_1000FULL) {
1203                         bp->line_speed = SPEED_1000;
1204                         bp->duplex = DUPLEX_FULL;
1205                 }
1206                 else if (common & ADVERTISE_1000HALF) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_HALF;
1209                 }
1210                 else {
1211                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1212                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1213
1214                         common = local_adv & remote_adv;
1215                         if (common & ADVERTISE_100FULL) {
1216                                 bp->line_speed = SPEED_100;
1217                                 bp->duplex = DUPLEX_FULL;
1218                         }
1219                         else if (common & ADVERTISE_100HALF) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_HALF;
1222                         }
1223                         else if (common & ADVERTISE_10FULL) {
1224                                 bp->line_speed = SPEED_10;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_10HALF) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else {
1232                                 bp->line_speed = 0;
1233                                 bp->link_up = 0;
1234                         }
1235                 }
1236         }
1237         else {
1238                 if (bmcr & BMCR_SPEED100) {
1239                         bp->line_speed = SPEED_100;
1240                 }
1241                 else {
1242                         bp->line_speed = SPEED_10;
1243                 }
1244                 if (bmcr & BMCR_FULLDPLX) {
1245                         bp->duplex = DUPLEX_FULL;
1246                 }
1247                 else {
1248                         bp->duplex = DUPLEX_HALF;
1249                 }
1250         }
1251
1252         return 0;
1253 }
1254
1255 static void
1256 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1257 {
1258         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1259
1260         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1261         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1262         val |= 0x02 << 8;
1263
1264         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1265                 u32 lo_water, hi_water;
1266
1267                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1268                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1269                 else
1270                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1271                 if (lo_water >= bp->rx_ring_size)
1272                         lo_water = 0;
1273
1274                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1275
1276                 if (hi_water <= lo_water)
1277                         lo_water = 0;
1278
1279                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1280                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1281
1282                 if (hi_water > 0xf)
1283                         hi_water = 0xf;
1284                 else if (hi_water == 0)
1285                         lo_water = 0;
1286                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1287         }
1288         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1289 }
1290
1291 static void
1292 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1293 {
1294         int i;
1295         u32 cid;
1296
1297         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1298                 if (i == 1)
1299                         cid = RX_RSS_CID;
1300                 bnx2_init_rx_context(bp, cid);
1301         }
1302 }
1303
1304 static void
1305 bnx2_set_mac_link(struct bnx2 *bp)
1306 {
1307         u32 val;
1308
1309         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1310         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1311                 (bp->duplex == DUPLEX_HALF)) {
1312                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1313         }
1314
1315         /* Configure the EMAC mode register. */
1316         val = REG_RD(bp, BNX2_EMAC_MODE);
1317
1318         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1319                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1320                 BNX2_EMAC_MODE_25G_MODE);
1321
1322         if (bp->link_up) {
1323                 switch (bp->line_speed) {
1324                         case SPEED_10:
1325                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1326                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1327                                         break;
1328                                 }
1329                                 /* fall through */
1330                         case SPEED_100:
1331                                 val |= BNX2_EMAC_MODE_PORT_MII;
1332                                 break;
1333                         case SPEED_2500:
1334                                 val |= BNX2_EMAC_MODE_25G_MODE;
1335                                 /* fall through */
1336                         case SPEED_1000:
1337                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1338                                 break;
1339                 }
1340         }
1341         else {
1342                 val |= BNX2_EMAC_MODE_PORT_GMII;
1343         }
1344
1345         /* Set the MAC to operate in the appropriate duplex mode. */
1346         if (bp->duplex == DUPLEX_HALF)
1347                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1348         REG_WR(bp, BNX2_EMAC_MODE, val);
1349
1350         /* Enable/disable rx PAUSE. */
1351         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1352
1353         if (bp->flow_ctrl & FLOW_CTRL_RX)
1354                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1355         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1356
1357         /* Enable/disable tx PAUSE. */
1358         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1359         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1360
1361         if (bp->flow_ctrl & FLOW_CTRL_TX)
1362                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1363         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1364
1365         /* Acknowledge the interrupt. */
1366         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1367
1368         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1369                 bnx2_init_all_rx_contexts(bp);
1370 }
1371
1372 static void
1373 bnx2_enable_bmsr1(struct bnx2 *bp)
1374 {
1375         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376             (CHIP_NUM(bp) == CHIP_NUM_5709))
1377                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1378                                MII_BNX2_BLK_ADDR_GP_STATUS);
1379 }
1380
1381 static void
1382 bnx2_disable_bmsr1(struct bnx2 *bp)
1383 {
1384         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385             (CHIP_NUM(bp) == CHIP_NUM_5709))
1386                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1388 }
1389
1390 static int
1391 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1392 {
1393         u32 up1;
1394         int ret = 1;
1395
1396         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1397                 return 0;
1398
1399         if (bp->autoneg & AUTONEG_SPEED)
1400                 bp->advertising |= ADVERTISED_2500baseX_Full;
1401
1402         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1404
1405         bnx2_read_phy(bp, bp->mii_up1, &up1);
1406         if (!(up1 & BCM5708S_UP1_2G5)) {
1407                 up1 |= BCM5708S_UP1_2G5;
1408                 bnx2_write_phy(bp, bp->mii_up1, up1);
1409                 ret = 0;
1410         }
1411
1412         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1413                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1414                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1415
1416         return ret;
1417 }
1418
1419 static int
1420 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1421 {
1422         u32 up1;
1423         int ret = 0;
1424
1425         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1426                 return 0;
1427
1428         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1429                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1430
1431         bnx2_read_phy(bp, bp->mii_up1, &up1);
1432         if (up1 & BCM5708S_UP1_2G5) {
1433                 up1 &= ~BCM5708S_UP1_2G5;
1434                 bnx2_write_phy(bp, bp->mii_up1, up1);
1435                 ret = 1;
1436         }
1437
1438         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1439                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1440                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1441
1442         return ret;
1443 }
1444
1445 static void
1446 bnx2_enable_forced_2g5(struct bnx2 *bp)
1447 {
1448         u32 bmcr;
1449
1450         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1451                 return;
1452
1453         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1454                 u32 val;
1455
1456                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1458                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1459                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1460                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1461                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1462
1463                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466
1467         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1468                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1469                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1470         } else {
1471                 return;
1472         }
1473
1474         if (bp->autoneg & AUTONEG_SPEED) {
1475                 bmcr &= ~BMCR_ANENABLE;
1476                 if (bp->req_duplex == DUPLEX_FULL)
1477                         bmcr |= BMCR_FULLDPLX;
1478         }
1479         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1480 }
1481
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1484 {
1485         u32 bmcr;
1486
1487         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1488                 return;
1489
1490         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1491                 u32 val;
1492
1493                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1495                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1496                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1497                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1498
1499                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1500                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1501                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502
1503         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1504                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1506         } else {
1507                 return;
1508         }
1509
1510         if (bp->autoneg & AUTONEG_SPEED)
1511                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518         u32 val;
1519
1520         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522         if (start)
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524         else
1525                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531         u32 bmsr;
1532         u8 link_up;
1533
1534         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535                 bp->link_up = 1;
1536                 return 0;
1537         }
1538
1539         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540                 return 0;
1541
1542         link_up = bp->link_up;
1543
1544         bnx2_enable_bmsr1(bp);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547         bnx2_disable_bmsr1(bp);
1548
1549         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551                 u32 val, an_dbg;
1552
1553                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554                         bnx2_5706s_force_link_dn(bp, 0);
1555                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556                 }
1557                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1558
1559                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565                         bmsr |= BMSR_LSTATUS;
1566                 else
1567                         bmsr &= ~BMSR_LSTATUS;
1568         }
1569
1570         if (bmsr & BMSR_LSTATUS) {
1571                 bp->link_up = 1;
1572
1573                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575                                 bnx2_5706s_linkup(bp);
1576                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577                                 bnx2_5708s_linkup(bp);
1578                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579                                 bnx2_5709s_linkup(bp);
1580                 }
1581                 else {
1582                         bnx2_copper_linkup(bp);
1583                 }
1584                 bnx2_resolve_flow_ctrl(bp);
1585         }
1586         else {
1587                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588                     (bp->autoneg & AUTONEG_SPEED))
1589                         bnx2_disable_forced_2g5(bp);
1590
1591                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592                         u32 bmcr;
1593
1594                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595                         bmcr |= BMCR_ANENABLE;
1596                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
1598                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599                 }
1600                 bp->link_up = 0;
1601         }
1602
1603         if (bp->link_up != link_up) {
1604                 bnx2_report_link(bp);
1605         }
1606
1607         bnx2_set_mac_link(bp);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615         int i;
1616         u32 reg;
1617
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619
1620 #define PHY_RESET_MAX_WAIT 100
1621         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622                 udelay(10);
1623
1624                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625                 if (!(reg & BMCR_RESET)) {
1626                         udelay(20);
1627                         break;
1628                 }
1629         }
1630         if (i == PHY_RESET_MAX_WAIT) {
1631                 return -EBUSY;
1632         }
1633         return 0;
1634 }
1635
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639         u32 adv = 0;
1640
1641         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
1644                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645                         adv = ADVERTISE_1000XPAUSE;
1646                 }
1647                 else {
1648                         adv = ADVERTISE_PAUSE_CAP;
1649                 }
1650         }
1651         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653                         adv = ADVERTISE_1000XPSE_ASYM;
1654                 }
1655                 else {
1656                         adv = ADVERTISE_PAUSE_ASYM;
1657                 }
1658         }
1659         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662                 }
1663                 else {
1664                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665                 }
1666         }
1667         return adv;
1668 }
1669
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677         u32 speed_arg = 0, pause_adv;
1678
1679         pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681         if (bp->autoneg & AUTONEG_SPEED) {
1682                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683                 if (bp->advertising & ADVERTISED_10baseT_Half)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685                 if (bp->advertising & ADVERTISED_10baseT_Full)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687                 if (bp->advertising & ADVERTISED_100baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689                 if (bp->advertising & ADVERTISED_100baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695         } else {
1696                 if (bp->req_line_speed == SPEED_2500)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698                 else if (bp->req_line_speed == SPEED_1000)
1699                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700                 else if (bp->req_line_speed == SPEED_100) {
1701                         if (bp->req_duplex == DUPLEX_FULL)
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703                         else
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705                 } else if (bp->req_line_speed == SPEED_10) {
1706                         if (bp->req_duplex == DUPLEX_FULL)
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708                         else
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710                 }
1711         }
1712
1713         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718         if (port == PORT_TP)
1719                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
1722         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723
1724         spin_unlock_bh(&bp->phy_lock);
1725         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726         spin_lock_bh(&bp->phy_lock);
1727
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736         u32 adv, bmcr;
1737         u32 new_adv = 0;
1738
1739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740                 return (bnx2_setup_remote_phy(bp, port));
1741
1742         if (!(bp->autoneg & AUTONEG_SPEED)) {
1743                 u32 new_bmcr;
1744                 int force_link_down = 0;
1745
1746                 if (bp->req_line_speed == SPEED_2500) {
1747                         if (!bnx2_test_and_enable_2g5(bp))
1748                                 force_link_down = 1;
1749                 } else if (bp->req_line_speed == SPEED_1000) {
1750                         if (bnx2_test_and_disable_2g5(bp))
1751                                 force_link_down = 1;
1752                 }
1753                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1754                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
1756                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758                 new_bmcr |= BMCR_SPEED1000;
1759
1760                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761                         if (bp->req_line_speed == SPEED_2500)
1762                                 bnx2_enable_forced_2g5(bp);
1763                         else if (bp->req_line_speed == SPEED_1000) {
1764                                 bnx2_disable_forced_2g5(bp);
1765                                 new_bmcr &= ~0x2000;
1766                         }
1767
1768                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769                         if (bp->req_line_speed == SPEED_2500)
1770                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771                         else
1772                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773                 }
1774
1775                 if (bp->req_duplex == DUPLEX_FULL) {
1776                         adv |= ADVERTISE_1000XFULL;
1777                         new_bmcr |= BMCR_FULLDPLX;
1778                 }
1779                 else {
1780                         adv |= ADVERTISE_1000XHALF;
1781                         new_bmcr &= ~BMCR_FULLDPLX;
1782                 }
1783                 if ((new_bmcr != bmcr) || (force_link_down)) {
1784                         /* Force a link down visible on the other side */
1785                         if (bp->link_up) {
1786                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1787                                                ~(ADVERTISE_1000XFULL |
1788                                                  ADVERTISE_1000XHALF));
1789                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790                                         BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792                                 bp->link_up = 0;
1793                                 netif_carrier_off(bp->dev);
1794                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795                                 bnx2_report_link(bp);
1796                         }
1797                         bnx2_write_phy(bp, bp->mii_adv, adv);
1798                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                 } else {
1800                         bnx2_resolve_flow_ctrl(bp);
1801                         bnx2_set_mac_link(bp);
1802                 }
1803                 return 0;
1804         }
1805
1806         bnx2_test_and_enable_2g5(bp);
1807
1808         if (bp->advertising & ADVERTISED_1000baseT_Full)
1809                 new_adv |= ADVERTISE_1000XFULL;
1810
1811         new_adv |= bnx2_phy_get_pause_adv(bp);
1812
1813         bnx2_read_phy(bp, bp->mii_adv, &adv);
1814         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815
1816         bp->serdes_an_pending = 0;
1817         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818                 /* Force a link down visible on the other side */
1819                 if (bp->link_up) {
1820                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821                         spin_unlock_bh(&bp->phy_lock);
1822                         msleep(20);
1823                         spin_lock_bh(&bp->phy_lock);
1824                 }
1825
1826                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828                         BMCR_ANENABLE);
1829                 /* Speed up link-up time when the link partner
1830                  * does not autonegotiate which is very common
1831                  * in blade servers. Some blade servers use
1832                  * IPMI for kerboard input and it's important
1833                  * to minimize link disruptions. Autoneg. involves
1834                  * exchanging base pages plus 3 next pages and
1835                  * normally completes in about 120 msec.
1836                  */
1837                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838                 bp->serdes_an_pending = 1;
1839                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840         } else {
1841                 bnx2_resolve_flow_ctrl(bp);
1842                 bnx2_set_mac_link(bp);
1843         }
1844
1845         return 0;
1846 }
1847
1848 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1849         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1850                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851                 (ADVERTISED_1000baseT_Full)
1852
1853 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1854         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1855         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1856         ADVERTISED_1000baseT_Full)
1857
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866         u32 link;
1867
1868         if (bp->phy_port == PORT_TP)
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870         else
1871                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872
1873         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874                 bp->req_line_speed = 0;
1875                 bp->autoneg |= AUTONEG_SPEED;
1876                 bp->advertising = ADVERTISED_Autoneg;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878                         bp->advertising |= ADVERTISED_10baseT_Half;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880                         bp->advertising |= ADVERTISED_10baseT_Full;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882                         bp->advertising |= ADVERTISED_100baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884                         bp->advertising |= ADVERTISED_100baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886                         bp->advertising |= ADVERTISED_1000baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888                         bp->advertising |= ADVERTISED_2500baseX_Full;
1889         } else {
1890                 bp->autoneg = 0;
1891                 bp->advertising = 0;
1892                 bp->req_duplex = DUPLEX_FULL;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894                         bp->req_line_speed = SPEED_10;
1895                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896                                 bp->req_duplex = DUPLEX_HALF;
1897                 }
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899                         bp->req_line_speed = SPEED_100;
1900                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901                                 bp->req_duplex = DUPLEX_HALF;
1902                 }
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904                         bp->req_line_speed = SPEED_1000;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906                         bp->req_line_speed = SPEED_2500;
1907         }
1908 }
1909
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914                 bnx2_set_default_remote_link(bp);
1915                 return;
1916         }
1917
1918         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919         bp->req_line_speed = 0;
1920         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921                 u32 reg;
1922
1923                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
1925                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928                         bp->autoneg = 0;
1929                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1930                         bp->req_duplex = DUPLEX_FULL;
1931                 }
1932         } else
1933                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939         u32 msg;
1940         u32 addr;
1941
1942         spin_lock(&bp->indirect_lock);
1943         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947         spin_unlock(&bp->indirect_lock);
1948 }
1949
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u8 link_up = bp->link_up;
1955         u8 old_port;
1956
1957         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958
1959         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960                 bnx2_send_heart_beat(bp);
1961
1962         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
1964         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965                 bp->link_up = 0;
1966         else {
1967                 u32 speed;
1968
1969                 bp->link_up = 1;
1970                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971                 bp->duplex = DUPLEX_FULL;
1972                 switch (speed) {
1973                         case BNX2_LINK_STATUS_10HALF:
1974                                 bp->duplex = DUPLEX_HALF;
1975                         case BNX2_LINK_STATUS_10FULL:
1976                                 bp->line_speed = SPEED_10;
1977                                 break;
1978                         case BNX2_LINK_STATUS_100HALF:
1979                                 bp->duplex = DUPLEX_HALF;
1980                         case BNX2_LINK_STATUS_100BASE_T4:
1981                         case BNX2_LINK_STATUS_100FULL:
1982                                 bp->line_speed = SPEED_100;
1983                                 break;
1984                         case BNX2_LINK_STATUS_1000HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                         case BNX2_LINK_STATUS_1000FULL:
1987                                 bp->line_speed = SPEED_1000;
1988                                 break;
1989                         case BNX2_LINK_STATUS_2500HALF:
1990                                 bp->duplex = DUPLEX_HALF;
1991                         case BNX2_LINK_STATUS_2500FULL:
1992                                 bp->line_speed = SPEED_2500;
1993                                 break;
1994                         default:
1995                                 bp->line_speed = 0;
1996                                 break;
1997                 }
1998
1999                 bp->flow_ctrl = 0;
2000                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002                         if (bp->duplex == DUPLEX_FULL)
2003                                 bp->flow_ctrl = bp->req_flow_ctrl;
2004                 } else {
2005                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2007                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2009                 }
2010
2011                 old_port = bp->phy_port;
2012                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013                         bp->phy_port = PORT_FIBRE;
2014                 else
2015                         bp->phy_port = PORT_TP;
2016
2017                 if (old_port != bp->phy_port)
2018                         bnx2_set_default_link(bp);
2019
2020         }
2021         if (bp->link_up != link_up)
2022                 bnx2_report_link(bp);
2023
2024         bnx2_set_mac_link(bp);
2025 }
2026
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2029 {
2030         u32 evt_code;
2031
2032         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033         switch (evt_code) {
2034                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2035                         bnx2_remote_phy_event(bp);
2036                         break;
2037                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038                 default:
2039                         bnx2_send_heart_beat(bp);
2040                         break;
2041         }
2042         return 0;
2043 }
2044
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2049 {
2050         u32 bmcr;
2051         u32 new_bmcr;
2052
2053         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054
2055         if (bp->autoneg & AUTONEG_SPEED) {
2056                 u32 adv_reg, adv1000_reg;
2057                 u32 new_adv_reg = 0;
2058                 u32 new_adv1000_reg = 0;
2059
2060                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062                         ADVERTISE_PAUSE_ASYM);
2063
2064                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065                 adv1000_reg &= PHY_ALL_1000_SPEED;
2066
2067                 if (bp->advertising & ADVERTISED_10baseT_Half)
2068                         new_adv_reg |= ADVERTISE_10HALF;
2069                 if (bp->advertising & ADVERTISED_10baseT_Full)
2070                         new_adv_reg |= ADVERTISE_10FULL;
2071                 if (bp->advertising & ADVERTISED_100baseT_Half)
2072                         new_adv_reg |= ADVERTISE_100HALF;
2073                 if (bp->advertising & ADVERTISED_100baseT_Full)
2074                         new_adv_reg |= ADVERTISE_100FULL;
2075                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2076                         new_adv1000_reg |= ADVERTISE_1000FULL;
2077
2078                 new_adv_reg |= ADVERTISE_CSMA;
2079
2080                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2081
2082                 if ((adv1000_reg != new_adv1000_reg) ||
2083                         (adv_reg != new_adv_reg) ||
2084                         ((bmcr & BMCR_ANENABLE) == 0)) {
2085
2086                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2087                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2088                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089                                 BMCR_ANENABLE);
2090                 }
2091                 else if (bp->link_up) {
2092                         /* Flow ctrl may have changed from auto to forced */
2093                         /* or vice-versa. */
2094
2095                         bnx2_resolve_flow_ctrl(bp);
2096                         bnx2_set_mac_link(bp);
2097                 }
2098                 return 0;
2099         }
2100
2101         new_bmcr = 0;
2102         if (bp->req_line_speed == SPEED_100) {
2103                 new_bmcr |= BMCR_SPEED100;
2104         }
2105         if (bp->req_duplex == DUPLEX_FULL) {
2106                 new_bmcr |= BMCR_FULLDPLX;
2107         }
2108         if (new_bmcr != bmcr) {
2109                 u32 bmsr;
2110
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113
2114                 if (bmsr & BMSR_LSTATUS) {
2115                         /* Force link down */
2116                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2117                         spin_unlock_bh(&bp->phy_lock);
2118                         msleep(50);
2119                         spin_lock_bh(&bp->phy_lock);
2120
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2123                 }
2124
2125                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2126
2127                 /* Normally, the new speed is setup after the link has
2128                  * gone down and up again. In some cases, link will not go
2129                  * down so we need to set up the new speed here.
2130                  */
2131                 if (bmsr & BMSR_LSTATUS) {
2132                         bp->line_speed = bp->req_line_speed;
2133                         bp->duplex = bp->req_duplex;
2134                         bnx2_resolve_flow_ctrl(bp);
2135                         bnx2_set_mac_link(bp);
2136                 }
2137         } else {
2138                 bnx2_resolve_flow_ctrl(bp);
2139                 bnx2_set_mac_link(bp);
2140         }
2141         return 0;
2142 }
2143
2144 static int
2145 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2146 __releases(&bp->phy_lock)
2147 __acquires(&bp->phy_lock)
2148 {
2149         if (bp->loopback == MAC_LOOPBACK)
2150                 return 0;
2151
2152         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2153                 return (bnx2_setup_serdes_phy(bp, port));
2154         }
2155         else {
2156                 return (bnx2_setup_copper_phy(bp));
2157         }
2158 }
2159
2160 static int
2161 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2162 {
2163         u32 val;
2164
2165         bp->mii_bmcr = MII_BMCR + 0x10;
2166         bp->mii_bmsr = MII_BMSR + 0x10;
2167         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2168         bp->mii_adv = MII_ADVERTISE + 0x10;
2169         bp->mii_lpa = MII_LPA + 0x10;
2170         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2173         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2174
2175         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2176         if (reset_phy)
2177                 bnx2_reset_phy(bp);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2180
2181         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2182         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2183         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2184         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2185
2186         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2187         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2188         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2189                 val |= BCM5708S_UP1_2G5;
2190         else
2191                 val &= ~BCM5708S_UP1_2G5;
2192         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2193
2194         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2195         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2196         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2197         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2198
2199         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2200
2201         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2202               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2203         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2204
2205         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2206
2207         return 0;
2208 }
2209
2210 static int
2211 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2212 {
2213         u32 val;
2214
2215         if (reset_phy)
2216                 bnx2_reset_phy(bp);
2217
2218         bp->mii_up1 = BCM5708S_UP1;
2219
2220         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2221         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2222         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2223
2224         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2225         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2226         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2229         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2231
2232         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2233                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2234                 val |= BCM5708S_UP1_2G5;
2235                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2236         }
2237
2238         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2240             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2241                 /* increase tx signal amplitude */
2242                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2243                                BCM5708S_BLK_ADDR_TX_MISC);
2244                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2245                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2246                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2247                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248         }
2249
2250         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2251               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2252
2253         if (val) {
2254                 u32 is_backplane;
2255
2256                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2257                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2258                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2259                                        BCM5708S_BLK_ADDR_TX_MISC);
2260                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2261                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262                                        BCM5708S_BLK_ADDR_DIG);
2263                 }
2264         }
2265         return 0;
2266 }
2267
2268 static int
2269 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2270 {
2271         if (reset_phy)
2272                 bnx2_reset_phy(bp);
2273
2274         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2275
2276         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2277                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2278
2279         if (bp->dev->mtu > 1500) {
2280                 u32 val;
2281
2282                 /* Set extended packet length bit */
2283                 bnx2_write_phy(bp, 0x18, 0x7);
2284                 bnx2_read_phy(bp, 0x18, &val);
2285                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2286
2287                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2288                 bnx2_read_phy(bp, 0x1c, &val);
2289                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2290         }
2291         else {
2292                 u32 val;
2293
2294                 bnx2_write_phy(bp, 0x18, 0x7);
2295                 bnx2_read_phy(bp, 0x18, &val);
2296                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2297
2298                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2299                 bnx2_read_phy(bp, 0x1c, &val);
2300                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2301         }
2302
2303         return 0;
2304 }
2305
2306 static int
2307 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2308 {
2309         u32 val;
2310
2311         if (reset_phy)
2312                 bnx2_reset_phy(bp);
2313
2314         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2315                 bnx2_write_phy(bp, 0x18, 0x0c00);
2316                 bnx2_write_phy(bp, 0x17, 0x000a);
2317                 bnx2_write_phy(bp, 0x15, 0x310b);
2318                 bnx2_write_phy(bp, 0x17, 0x201f);
2319                 bnx2_write_phy(bp, 0x15, 0x9506);
2320                 bnx2_write_phy(bp, 0x17, 0x401f);
2321                 bnx2_write_phy(bp, 0x15, 0x14e2);
2322                 bnx2_write_phy(bp, 0x18, 0x0400);
2323         }
2324
2325         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2326                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2327                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2328                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2329                 val &= ~(1 << 8);
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2331         }
2332
2333         if (bp->dev->mtu > 1500) {
2334                 /* Set extended packet length bit */
2335                 bnx2_write_phy(bp, 0x18, 0x7);
2336                 bnx2_read_phy(bp, 0x18, &val);
2337                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2338
2339                 bnx2_read_phy(bp, 0x10, &val);
2340                 bnx2_write_phy(bp, 0x10, val | 0x1);
2341         }
2342         else {
2343                 bnx2_write_phy(bp, 0x18, 0x7);
2344                 bnx2_read_phy(bp, 0x18, &val);
2345                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2346
2347                 bnx2_read_phy(bp, 0x10, &val);
2348                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2349         }
2350
2351         /* ethernet@wirespeed */
2352         bnx2_write_phy(bp, 0x18, 0x7007);
2353         bnx2_read_phy(bp, 0x18, &val);
2354         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2355         return 0;
2356 }
2357
2358
2359 static int
2360 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2361 __releases(&bp->phy_lock)
2362 __acquires(&bp->phy_lock)
2363 {
2364         u32 val;
2365         int rc = 0;
2366
2367         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2368         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2369
2370         bp->mii_bmcr = MII_BMCR;
2371         bp->mii_bmsr = MII_BMSR;
2372         bp->mii_bmsr1 = MII_BMSR;
2373         bp->mii_adv = MII_ADVERTISE;
2374         bp->mii_lpa = MII_LPA;
2375
2376         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2377
2378         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2379                 goto setup_phy;
2380
2381         bnx2_read_phy(bp, MII_PHYSID1, &val);
2382         bp->phy_id = val << 16;
2383         bnx2_read_phy(bp, MII_PHYSID2, &val);
2384         bp->phy_id |= val & 0xffff;
2385
2386         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2387                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2388                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2389                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2390                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2391                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2392                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2393         }
2394         else {
2395                 rc = bnx2_init_copper_phy(bp, reset_phy);
2396         }
2397
2398 setup_phy:
2399         if (!rc)
2400                 rc = bnx2_setup_phy(bp, bp->phy_port);
2401
2402         return rc;
2403 }
2404
2405 static int
2406 bnx2_set_mac_loopback(struct bnx2 *bp)
2407 {
2408         u32 mac_mode;
2409
2410         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2411         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2412         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2413         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2414         bp->link_up = 1;
2415         return 0;
2416 }
2417
2418 static int bnx2_test_link(struct bnx2 *);
2419
2420 static int
2421 bnx2_set_phy_loopback(struct bnx2 *bp)
2422 {
2423         u32 mac_mode;
2424         int rc, i;
2425
2426         spin_lock_bh(&bp->phy_lock);
2427         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2428                             BMCR_SPEED1000);
2429         spin_unlock_bh(&bp->phy_lock);
2430         if (rc)
2431                 return rc;
2432
2433         for (i = 0; i < 10; i++) {
2434                 if (bnx2_test_link(bp) == 0)
2435                         break;
2436                 msleep(100);
2437         }
2438
2439         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2440         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2441                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2442                       BNX2_EMAC_MODE_25G_MODE);
2443
2444         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2445         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446         bp->link_up = 1;
2447         return 0;
2448 }
2449
2450 static int
2451 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2452 {
2453         int i;
2454         u32 val;
2455
2456         bp->fw_wr_seq++;
2457         msg_data |= bp->fw_wr_seq;
2458
2459         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2460
2461         if (!ack)
2462                 return 0;
2463
2464         /* wait for an acknowledgement. */
2465         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2466                 msleep(10);
2467
2468                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2469
2470                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2471                         break;
2472         }
2473         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2474                 return 0;
2475
2476         /* If we timed out, inform the firmware that this is the case. */
2477         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2478                 if (!silent)
2479                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2480
2481                 msg_data &= ~BNX2_DRV_MSG_CODE;
2482                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2483
2484                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2485
2486                 return -EBUSY;
2487         }
2488
2489         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490                 return -EIO;
2491
2492         return 0;
2493 }
2494
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2497 {
2498         int i, ret = 0;
2499         u32 val;
2500
2501         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502         val |= (BCM_PAGE_BITS - 8) << 16;
2503         REG_WR(bp, BNX2_CTX_COMMAND, val);
2504         for (i = 0; i < 10; i++) {
2505                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507                         break;
2508                 udelay(2);
2509         }
2510         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511                 return -EBUSY;
2512
2513         for (i = 0; i < bp->ctx_pages; i++) {
2514                 int j;
2515
2516                 if (bp->ctx_blk[i])
2517                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518                 else
2519                         return -ENOMEM;
2520
2521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525                        (u64) bp->ctx_blk_mapping[i] >> 32);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528                 for (j = 0; j < 10; j++) {
2529
2530                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532                                 break;
2533                         udelay(5);
2534                 }
2535                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536                         ret = -EBUSY;
2537                         break;
2538                 }
2539         }
2540         return ret;
2541 }
2542
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2545 {
2546         u32 vcid;
2547
2548         vcid = 96;
2549         while (vcid) {
2550                 u32 vcid_addr, pcid_addr, offset;
2551                 int i;
2552
2553                 vcid--;
2554
2555                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556                         u32 new_vcid;
2557
2558                         vcid_addr = GET_PCID_ADDR(vcid);
2559                         if (vcid & 0x8) {
2560                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2561                         }
2562                         else {
2563                                 new_vcid = vcid;
2564                         }
2565                         pcid_addr = GET_PCID_ADDR(new_vcid);
2566                 }
2567                 else {
2568                         vcid_addr = GET_CID_ADDR(vcid);
2569                         pcid_addr = vcid_addr;
2570                 }
2571
2572                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573                         vcid_addr += (i << PHY_CTX_SHIFT);
2574                         pcid_addr += (i << PHY_CTX_SHIFT);
2575
2576                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2578
2579                         /* Zero out the context. */
2580                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2582                 }
2583         }
2584 }
2585
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 {
2589         u16 *good_mbuf;
2590         u32 good_mbuf_cnt;
2591         u32 val;
2592
2593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594         if (good_mbuf == NULL) {
2595                 pr_err("Failed to allocate memory in %s\n", __func__);
2596                 return -ENOMEM;
2597         }
2598
2599         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2600                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2601
2602         good_mbuf_cnt = 0;
2603
2604         /* Allocate a bunch of mbufs and save the good ones in an array. */
2605         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2606         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2607                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2608                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2609
2610                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2611
2612                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2613
2614                 /* The addresses with Bit 9 set are bad memory blocks. */
2615                 if (!(val & (1 << 9))) {
2616                         good_mbuf[good_mbuf_cnt] = (u16) val;
2617                         good_mbuf_cnt++;
2618                 }
2619
2620                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2621         }
2622
2623         /* Free the good ones back to the mbuf pool thus discarding
2624          * all the bad ones. */
2625         while (good_mbuf_cnt) {
2626                 good_mbuf_cnt--;
2627
2628                 val = good_mbuf[good_mbuf_cnt];
2629                 val = (val << 9) | val | 1;
2630
2631                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2632         }
2633         kfree(good_mbuf);
2634         return 0;
2635 }
2636
2637 static void
2638 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2639 {
2640         u32 val;
2641
2642         val = (mac_addr[0] << 8) | mac_addr[1];
2643
2644         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2645
2646         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2647                 (mac_addr[4] << 8) | mac_addr[5];
2648
2649         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2650 }
2651
2652 static inline int
2653 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2654 {
2655         dma_addr_t mapping;
2656         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2657         struct rx_bd *rxbd =
2658                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2659         struct page *page = alloc_page(GFP_ATOMIC);
2660
2661         if (!page)
2662                 return -ENOMEM;
2663         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2664                                PCI_DMA_FROMDEVICE);
2665         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2666                 __free_page(page);
2667                 return -EIO;
2668         }
2669
2670         rx_pg->page = page;
2671         pci_unmap_addr_set(rx_pg, mapping, mapping);
2672         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2673         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2674         return 0;
2675 }
2676
2677 static void
2678 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2679 {
2680         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2681         struct page *page = rx_pg->page;
2682
2683         if (!page)
2684                 return;
2685
2686         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2687                        PCI_DMA_FROMDEVICE);
2688
2689         __free_page(page);
2690         rx_pg->page = NULL;
2691 }
2692
2693 static inline int
2694 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2695 {
2696         struct sk_buff *skb;
2697         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2698         dma_addr_t mapping;
2699         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2700         unsigned long align;
2701
2702         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2703         if (skb == NULL) {
2704                 return -ENOMEM;
2705         }
2706
2707         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2708                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2709
2710         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2711                 PCI_DMA_FROMDEVICE);
2712         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2713                 dev_kfree_skb(skb);
2714                 return -EIO;
2715         }
2716
2717         rx_buf->skb = skb;
2718         pci_unmap_addr_set(rx_buf, mapping, mapping);
2719
2720         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         return 0;
2726 }
2727
2728 static int
2729 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2730 {
2731         struct status_block *sblk = bnapi->status_blk.msi;
2732         u32 new_link_state, old_link_state;
2733         int is_set = 1;
2734
2735         new_link_state = sblk->status_attn_bits & event;
2736         old_link_state = sblk->status_attn_bits_ack & event;
2737         if (new_link_state != old_link_state) {
2738                 if (new_link_state)
2739                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2740                 else
2741                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2742         } else
2743                 is_set = 0;
2744
2745         return is_set;
2746 }
2747
2748 static void
2749 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2750 {
2751         spin_lock(&bp->phy_lock);
2752
2753         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2754                 bnx2_set_link(bp);
2755         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2756                 bnx2_set_remote_link(bp);
2757
2758         spin_unlock(&bp->phy_lock);
2759
2760 }
2761
2762 static inline u16
2763 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2764 {
2765         u16 cons;
2766
2767         /* Tell compiler that status block fields can change. */
2768         barrier();
2769         cons = *bnapi->hw_tx_cons_ptr;
2770         barrier();
2771         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2772                 cons++;
2773         return cons;
2774 }
2775
2776 static int
2777 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2778 {
2779         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2780         u16 hw_cons, sw_cons, sw_ring_cons;
2781         int tx_pkt = 0, index;
2782         struct netdev_queue *txq;
2783
2784         index = (bnapi - bp->bnx2_napi);
2785         txq = netdev_get_tx_queue(bp->dev, index);
2786
2787         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2788         sw_cons = txr->tx_cons;
2789
2790         while (sw_cons != hw_cons) {
2791                 struct sw_tx_bd *tx_buf;
2792                 struct sk_buff *skb;
2793                 int i, last;
2794
2795                 sw_ring_cons = TX_RING_IDX(sw_cons);
2796
2797                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2798                 skb = tx_buf->skb;
2799
2800                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2801                 prefetch(&skb->end);
2802
2803                 /* partial BD completions possible with TSO packets */
2804                 if (tx_buf->is_gso) {
2805                         u16 last_idx, last_ring_idx;
2806
2807                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2808                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2809                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2810                                 last_idx++;
2811                         }
2812                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2813                                 break;
2814                         }
2815                 }
2816
2817                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2818                         skb_headlen(skb), PCI_DMA_TODEVICE);
2819
2820                 tx_buf->skb = NULL;
2821                 last = tx_buf->nr_frags;
2822
2823                 for (i = 0; i < last; i++) {
2824                         sw_cons = NEXT_TX_BD(sw_cons);
2825
2826                         pci_unmap_page(bp->pdev,
2827                                 pci_unmap_addr(
2828                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829                                         mapping),
2830                                 skb_shinfo(skb)->frags[i].size,
2831                                 PCI_DMA_TODEVICE);
2832                 }
2833
2834                 sw_cons = NEXT_TX_BD(sw_cons);
2835
2836                 dev_kfree_skb(skb);
2837                 tx_pkt++;
2838                 if (tx_pkt == budget)
2839                         break;
2840
2841                 if (hw_cons == sw_cons)
2842                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2843         }
2844
2845         txr->hw_tx_cons = hw_cons;
2846         txr->tx_cons = sw_cons;
2847
2848         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2849          * before checking for netif_tx_queue_stopped().  Without the
2850          * memory barrier, there is a small possibility that bnx2_start_xmit()
2851          * will miss it and cause the queue to be stopped forever.
2852          */
2853         smp_mb();
2854
2855         if (unlikely(netif_tx_queue_stopped(txq)) &&
2856                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2857                 __netif_tx_lock(txq, smp_processor_id());
2858                 if ((netif_tx_queue_stopped(txq)) &&
2859                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2860                         netif_tx_wake_queue(txq);
2861                 __netif_tx_unlock(txq);
2862         }
2863
2864         return tx_pkt;
2865 }
2866
2867 static void
2868 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2869                         struct sk_buff *skb, int count)
2870 {
2871         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2872         struct rx_bd *cons_bd, *prod_bd;
2873         int i;
2874         u16 hw_prod, prod;
2875         u16 cons = rxr->rx_pg_cons;
2876
2877         cons_rx_pg = &rxr->rx_pg_ring[cons];
2878
2879         /* The caller was unable to allocate a new page to replace the
2880          * last one in the frags array, so we need to recycle that page
2881          * and then free the skb.
2882          */
2883         if (skb) {
2884                 struct page *page;
2885                 struct skb_shared_info *shinfo;
2886
2887                 shinfo = skb_shinfo(skb);
2888                 shinfo->nr_frags--;
2889                 page = shinfo->frags[shinfo->nr_frags].page;
2890                 shinfo->frags[shinfo->nr_frags].page = NULL;
2891
2892                 cons_rx_pg->page = page;
2893                 dev_kfree_skb(skb);
2894         }
2895
2896         hw_prod = rxr->rx_pg_prod;
2897
2898         for (i = 0; i < count; i++) {
2899                 prod = RX_PG_RING_IDX(hw_prod);
2900
2901                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2902                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2903                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2904                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2905
2906                 if (prod != cons) {
2907                         prod_rx_pg->page = cons_rx_pg->page;
2908                         cons_rx_pg->page = NULL;
2909                         pci_unmap_addr_set(prod_rx_pg, mapping,
2910                                 pci_unmap_addr(cons_rx_pg, mapping));
2911
2912                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2914
2915                 }
2916                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2917                 hw_prod = NEXT_RX_BD(hw_prod);
2918         }
2919         rxr->rx_pg_prod = hw_prod;
2920         rxr->rx_pg_cons = cons;
2921 }
2922
2923 static inline void
2924 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2925                   struct sk_buff *skb, u16 cons, u16 prod)
2926 {
2927         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2928         struct rx_bd *cons_bd, *prod_bd;
2929
2930         cons_rx_buf = &rxr->rx_buf_ring[cons];
2931         prod_rx_buf = &rxr->rx_buf_ring[prod];
2932
2933         pci_dma_sync_single_for_device(bp->pdev,
2934                 pci_unmap_addr(cons_rx_buf, mapping),
2935                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2936
2937         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2938
2939         prod_rx_buf->skb = skb;
2940
2941         if (cons == prod)
2942                 return;
2943
2944         pci_unmap_addr_set(prod_rx_buf, mapping,
2945                         pci_unmap_addr(cons_rx_buf, mapping));
2946
2947         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2949         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2950         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 }
2952
2953 static int
2954 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2955             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2956             u32 ring_idx)
2957 {
2958         int err;
2959         u16 prod = ring_idx & 0xffff;
2960
2961         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2962         if (unlikely(err)) {
2963                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2964                 if (hdr_len) {
2965                         unsigned int raw_len = len + 4;
2966                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2967
2968                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2969                 }
2970                 return err;
2971         }
2972
2973         skb_reserve(skb, BNX2_RX_OFFSET);
2974         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2975                          PCI_DMA_FROMDEVICE);
2976
2977         if (hdr_len == 0) {
2978                 skb_put(skb, len);
2979                 return 0;
2980         } else {
2981                 unsigned int i, frag_len, frag_size, pages;
2982                 struct sw_pg *rx_pg;
2983                 u16 pg_cons = rxr->rx_pg_cons;
2984                 u16 pg_prod = rxr->rx_pg_prod;
2985
2986                 frag_size = len + 4 - hdr_len;
2987                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2988                 skb_put(skb, hdr_len);
2989
2990                 for (i = 0; i < pages; i++) {
2991                         dma_addr_t mapping_old;
2992
2993                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2994                         if (unlikely(frag_len <= 4)) {
2995                                 unsigned int tail = 4 - frag_len;
2996
2997                                 rxr->rx_pg_cons = pg_cons;
2998                                 rxr->rx_pg_prod = pg_prod;
2999                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3000                                                         pages - i);
3001                                 skb->len -= tail;
3002                                 if (i == 0) {
3003                                         skb->tail -= tail;
3004                                 } else {
3005                                         skb_frag_t *frag =
3006                                                 &skb_shinfo(skb)->frags[i - 1];
3007                                         frag->size -= tail;
3008                                         skb->data_len -= tail;
3009                                         skb->truesize -= tail;
3010                                 }
3011                                 return 0;
3012                         }
3013                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3014
3015                         /* Don't unmap yet.  If we're unable to allocate a new
3016                          * page, we need to recycle the page and the DMA addr.
3017                          */
3018                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3019                         if (i == pages - 1)
3020                                 frag_len -= 4;
3021
3022                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3023                         rx_pg->page = NULL;
3024
3025                         err = bnx2_alloc_rx_page(bp, rxr,
3026                                                  RX_PG_RING_IDX(pg_prod));
3027                         if (unlikely(err)) {
3028                                 rxr->rx_pg_cons = pg_cons;
3029                                 rxr->rx_pg_prod = pg_prod;
3030                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3031                                                         pages - i);
3032                                 return err;
3033                         }
3034
3035                         pci_unmap_page(bp->pdev, mapping_old,
3036                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3037
3038                         frag_size -= frag_len;
3039                         skb->data_len += frag_len;
3040                         skb->truesize += frag_len;
3041                         skb->len += frag_len;
3042
3043                         pg_prod = NEXT_RX_BD(pg_prod);
3044                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3045                 }
3046                 rxr->rx_pg_prod = pg_prod;
3047                 rxr->rx_pg_cons = pg_cons;
3048         }
3049         return 0;
3050 }
3051
3052 static inline u16
3053 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3054 {
3055         u16 cons;
3056
3057         /* Tell compiler that status block fields can change. */
3058         barrier();
3059         cons = *bnapi->hw_rx_cons_ptr;
3060         barrier();
3061         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3062                 cons++;
3063         return cons;
3064 }
3065
3066 static int
3067 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3068 {
3069         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3070         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3071         struct l2_fhdr *rx_hdr;
3072         int rx_pkt = 0, pg_ring_used = 0;
3073
3074         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3075         sw_cons = rxr->rx_cons;
3076         sw_prod = rxr->rx_prod;
3077
3078         /* Memory barrier necessary as speculative reads of the rx
3079          * buffer can be ahead of the index in the status block
3080          */
3081         rmb();
3082         while (sw_cons != hw_cons) {
3083                 unsigned int len, hdr_len;
3084                 u32 status;
3085                 struct sw_bd *rx_buf;
3086                 struct sk_buff *skb;
3087                 dma_addr_t dma_addr;
3088                 u16 vtag = 0;
3089                 int hw_vlan __maybe_unused = 0;
3090
3091                 sw_ring_cons = RX_RING_IDX(sw_cons);
3092                 sw_ring_prod = RX_RING_IDX(sw_prod);
3093
3094                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3095                 skb = rx_buf->skb;
3096
3097                 rx_buf->skb = NULL;
3098
3099                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3100
3101                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3102                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3103                         PCI_DMA_FROMDEVICE);
3104
3105                 rx_hdr = (struct l2_fhdr *) skb->data;
3106                 len = rx_hdr->l2_fhdr_pkt_len;
3107                 status = rx_hdr->l2_fhdr_status;
3108
3109                 hdr_len = 0;
3110                 if (status & L2_FHDR_STATUS_SPLIT) {
3111                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3112                         pg_ring_used = 1;
3113                 } else if (len > bp->rx_jumbo_thresh) {
3114                         hdr_len = bp->rx_jumbo_thresh;
3115                         pg_ring_used = 1;
3116                 }
3117
3118                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3119                                        L2_FHDR_ERRORS_PHY_DECODE |
3120                                        L2_FHDR_ERRORS_ALIGNMENT |
3121                                        L2_FHDR_ERRORS_TOO_SHORT |
3122                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3123
3124                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3125                                           sw_ring_prod);
3126                         if (pg_ring_used) {
3127                                 int pages;
3128
3129                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3130
3131                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3132                         }
3133                         goto next_rx;
3134                 }
3135
3136                 len -= 4;
3137
3138                 if (len <= bp->rx_copy_thresh) {
3139                         struct sk_buff *new_skb;
3140
3141                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3142                         if (new_skb == NULL) {
3143                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3144                                                   sw_ring_prod);
3145                                 goto next_rx;
3146                         }
3147
3148                         /* aligned copy */
3149                         skb_copy_from_linear_data_offset(skb,
3150                                                          BNX2_RX_OFFSET - 6,
3151                                       new_skb->data, len + 6);
3152                         skb_reserve(new_skb, 6);
3153                         skb_put(new_skb, len);
3154
3155                         bnx2_reuse_rx_skb(bp, rxr, skb,
3156                                 sw_ring_cons, sw_ring_prod);
3157
3158                         skb = new_skb;
3159                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3160                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3161                         goto next_rx;
3162
3163                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3164                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3165                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3166 #ifdef BCM_VLAN
3167                         if (bp->vlgrp)
3168                                 hw_vlan = 1;
3169                         else
3170 #endif
3171                         {
3172                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3173                                         __skb_push(skb, 4);
3174
3175                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3176                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3177                                 ve->h_vlan_TCI = htons(vtag);
3178                                 len += 4;
3179                         }
3180                 }
3181
3182                 skb->protocol = eth_type_trans(skb, bp->dev);
3183
3184                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3185                         (ntohs(skb->protocol) != 0x8100)) {
3186
3187                         dev_kfree_skb(skb);
3188                         goto next_rx;
3189
3190                 }
3191
3192                 skb->ip_summed = CHECKSUM_NONE;
3193                 if (bp->rx_csum &&
3194                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3195                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3196
3197                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3198                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3199                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3200                 }
3201
3202                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3203
3204 #ifdef BCM_VLAN
3205                 if (hw_vlan)
3206                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3207                 else
3208 #endif
3209                         netif_receive_skb(skb);
3210
3211                 rx_pkt++;
3212
3213 next_rx:
3214                 sw_cons = NEXT_RX_BD(sw_cons);
3215                 sw_prod = NEXT_RX_BD(sw_prod);
3216
3217                 if ((rx_pkt == budget))
3218                         break;
3219
3220                 /* Refresh hw_cons to see if there is new work */
3221                 if (sw_cons == hw_cons) {
3222                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3223                         rmb();
3224                 }
3225         }
3226         rxr->rx_cons = sw_cons;
3227         rxr->rx_prod = sw_prod;
3228
3229         if (pg_ring_used)
3230                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3231
3232         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3233
3234         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3235
3236         mmiowb();
3237
3238         return rx_pkt;
3239
3240 }
3241
3242 /* MSI ISR - The only difference between this and the INTx ISR
3243  * is that the MSI interrupt is always serviced.
3244  */
3245 static irqreturn_t
3246 bnx2_msi(int irq, void *dev_instance)
3247 {
3248         struct bnx2_napi *bnapi = dev_instance;
3249         struct bnx2 *bp = bnapi->bp;
3250
3251         prefetch(bnapi->status_blk.msi);
3252         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3253                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3254                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3255
3256         /* Return here if interrupt is disabled. */
3257         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3258                 return IRQ_HANDLED;
3259
3260         napi_schedule(&bnapi->napi);
3261
3262         return IRQ_HANDLED;
3263 }
3264
3265 static irqreturn_t
3266 bnx2_msi_1shot(int irq, void *dev_instance)
3267 {
3268         struct bnx2_napi *bnapi = dev_instance;
3269         struct bnx2 *bp = bnapi->bp;
3270
3271         prefetch(bnapi->status_blk.msi);
3272
3273         /* Return here if interrupt is disabled. */
3274         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3275                 return IRQ_HANDLED;
3276
3277         napi_schedule(&bnapi->napi);
3278
3279         return IRQ_HANDLED;
3280 }
3281
3282 static irqreturn_t
3283 bnx2_interrupt(int irq, void *dev_instance)
3284 {
3285         struct bnx2_napi *bnapi = dev_instance;
3286         struct bnx2 *bp = bnapi->bp;
3287         struct status_block *sblk = bnapi->status_blk.msi;
3288
3289         /* When using INTx, it is possible for the interrupt to arrive
3290          * at the CPU before the status block posted prior to the
3291          * interrupt. Reading a register will flush the status block.
3292          * When using MSI, the MSI message will always complete after
3293          * the status block write.
3294          */
3295         if ((sblk->status_idx == bnapi->last_status_idx) &&
3296             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3297              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3298                 return IRQ_NONE;
3299
3300         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3301                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3302                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3303
3304         /* Read back to deassert IRQ immediately to avoid too many
3305          * spurious interrupts.
3306          */
3307         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3308
3309         /* Return here if interrupt is shared and is disabled. */
3310         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3311                 return IRQ_HANDLED;
3312
3313         if (napi_schedule_prep(&bnapi->napi)) {
3314                 bnapi->last_status_idx = sblk->status_idx;
3315                 __napi_schedule(&bnapi->napi);
3316         }
3317
3318         return IRQ_HANDLED;
3319 }
3320
3321 static inline int
3322 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3323 {
3324         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3325         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3326
3327         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3328             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3329                 return 1;
3330         return 0;
3331 }
3332
3333 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3334                                  STATUS_ATTN_BITS_TIMER_ABORT)
3335
3336 static inline int
3337 bnx2_has_work(struct bnx2_napi *bnapi)
3338 {
3339         struct status_block *sblk = bnapi->status_blk.msi;
3340
3341         if (bnx2_has_fast_work(bnapi))
3342                 return 1;
3343
3344 #ifdef BCM_CNIC
3345         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3346                 return 1;
3347 #endif
3348
3349         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3350             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3351                 return 1;
3352
3353         return 0;
3354 }
3355
3356 static void
3357 bnx2_chk_missed_msi(struct bnx2 *bp)
3358 {
3359         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3360         u32 msi_ctrl;
3361
3362         if (bnx2_has_work(bnapi)) {
3363                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3364                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3365                         return;
3366
3367                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3368                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3369                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3370                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3371                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3372                 }
3373         }
3374
3375         bp->idle_chk_status_idx = bnapi->last_status_idx;
3376 }
3377
3378 #ifdef BCM_CNIC
3379 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3380 {
3381         struct cnic_ops *c_ops;
3382
3383         if (!bnapi->cnic_present)
3384                 return;
3385
3386         rcu_read_lock();
3387         c_ops = rcu_dereference(bp->cnic_ops);
3388         if (c_ops)
3389                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3390                                                       bnapi->status_blk.msi);
3391         rcu_read_unlock();
3392 }
3393 #endif
3394
3395 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3396 {
3397         struct status_block *sblk = bnapi->status_blk.msi;
3398         u32 status_attn_bits = sblk->status_attn_bits;
3399         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3400
3401         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3402             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3403
3404                 bnx2_phy_int(bp, bnapi);
3405
3406                 /* This is needed to take care of transient status
3407                  * during link changes.
3408                  */
3409                 REG_WR(bp, BNX2_HC_COMMAND,
3410                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3411                 REG_RD(bp, BNX2_HC_COMMAND);
3412         }
3413 }
3414
3415 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3416                           int work_done, int budget)
3417 {
3418         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3419         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3420
3421         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3422                 bnx2_tx_int(bp, bnapi, 0);
3423
3424         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3425                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3426
3427         return work_done;
3428 }
3429
3430 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3431 {
3432         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3433         struct bnx2 *bp = bnapi->bp;
3434         int work_done = 0;
3435         struct status_block_msix *sblk = bnapi->status_blk.msix;
3436
3437         while (1) {
3438                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3439                 if (unlikely(work_done >= budget))
3440                         break;
3441
3442                 bnapi->last_status_idx = sblk->status_idx;
3443                 /* status idx must be read before checking for more work. */
3444                 rmb();
3445                 if (likely(!bnx2_has_fast_work(bnapi))) {
3446
3447                         napi_complete(napi);
3448                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3449                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3450                                bnapi->last_status_idx);
3451                         break;
3452                 }
3453         }
3454         return work_done;
3455 }
3456
3457 static int bnx2_poll(struct napi_struct *napi, int budget)
3458 {
3459         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3460         struct bnx2 *bp = bnapi->bp;
3461         int work_done = 0;
3462         struct status_block *sblk = bnapi->status_blk.msi;
3463
3464         while (1) {
3465                 bnx2_poll_link(bp, bnapi);
3466
3467                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3468
3469 #ifdef BCM_CNIC
3470                 bnx2_poll_cnic(bp, bnapi);
3471 #endif
3472
3473                 /* bnapi->last_status_idx is used below to tell the hw how
3474                  * much work has been processed, so we must read it before
3475                  * checking for more work.
3476                  */
3477                 bnapi->last_status_idx = sblk->status_idx;
3478
3479                 if (unlikely(work_done >= budget))
3480                         break;
3481
3482                 rmb();
3483                 if (likely(!bnx2_has_work(bnapi))) {
3484                         napi_complete(napi);
3485                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3486                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3487                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3488                                        bnapi->last_status_idx);
3489                                 break;
3490                         }
3491                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3492                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3493                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3494                                bnapi->last_status_idx);
3495
3496                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3497                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3498                                bnapi->last_status_idx);
3499                         break;
3500                 }
3501         }
3502
3503         return work_done;
3504 }
3505
3506 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3507  * from set_multicast.
3508  */
3509 static void
3510 bnx2_set_rx_mode(struct net_device *dev)
3511 {
3512         struct bnx2 *bp = netdev_priv(dev);
3513         u32 rx_mode, sort_mode;
3514         struct netdev_hw_addr *ha;
3515         int i;
3516
3517         if (!netif_running(dev))
3518                 return;
3519
3520         spin_lock_bh(&bp->phy_lock);
3521
3522         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3523                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3524         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3525 #ifdef BCM_VLAN
3526         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3527                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3528 #else
3529         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3530                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3531 #endif
3532         if (dev->flags & IFF_PROMISC) {
3533                 /* Promiscuous mode. */
3534                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3535                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3536                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3537         }
3538         else if (dev->flags & IFF_ALLMULTI) {
3539                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3540                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3541                                0xffffffff);
3542                 }
3543                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3544         }
3545         else {
3546                 /* Accept one or more multicast(s). */
3547                 struct dev_mc_list *mclist;
3548                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3549                 u32 regidx;
3550                 u32 bit;
3551                 u32 crc;
3552
3553                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3554
3555                 netdev_for_each_mc_addr(mclist, dev) {
3556                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3557                         bit = crc & 0xff;
3558                         regidx = (bit & 0xe0) >> 5;
3559                         bit &= 0x1f;
3560                         mc_filter[regidx] |= (1 << bit);
3561                 }
3562
3563                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565                                mc_filter[i]);
3566                 }
3567
3568                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3569         }
3570
3571         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3572                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3573                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3574                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3575         } else if (!(dev->flags & IFF_PROMISC)) {
3576                 /* Add all entries into to the match filter list */
3577                 i = 0;
3578                 netdev_for_each_uc_addr(ha, dev) {
3579                         bnx2_set_mac_addr(bp, ha->addr,
3580                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3581                         sort_mode |= (1 <<
3582                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3583                         i++;
3584                 }
3585
3586         }
3587
3588         if (rx_mode != bp->rx_mode) {
3589                 bp->rx_mode = rx_mode;
3590                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3591         }
3592
3593         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3594         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3595         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3596
3597         spin_unlock_bh(&bp->phy_lock);
3598 }
3599
3600 static int __devinit
3601 check_fw_section(const struct firmware *fw,
3602                  const struct bnx2_fw_file_section *section,
3603                  u32 alignment, bool non_empty)
3604 {
3605         u32 offset = be32_to_cpu(section->offset);
3606         u32 len = be32_to_cpu(section->len);
3607
3608         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3609                 return -EINVAL;
3610         if ((non_empty && len == 0) || len > fw->size - offset ||
3611             len & (alignment - 1))
3612                 return -EINVAL;
3613         return 0;
3614 }
3615
3616 static int __devinit
3617 check_mips_fw_entry(const struct firmware *fw,
3618                     const struct bnx2_mips_fw_file_entry *entry)
3619 {
3620         if (check_fw_section(fw, &entry->text, 4, true) ||
3621             check_fw_section(fw, &entry->data, 4, false) ||
3622             check_fw_section(fw, &entry->rodata, 4, false))
3623                 return -EINVAL;
3624         return 0;
3625 }
3626
3627 static int __devinit
3628 bnx2_request_firmware(struct bnx2 *bp)
3629 {
3630         const char *mips_fw_file, *rv2p_fw_file;
3631         const struct bnx2_mips_fw_file *mips_fw;
3632         const struct bnx2_rv2p_fw_file *rv2p_fw;
3633         int rc;
3634
3635         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3636                 mips_fw_file = FW_MIPS_FILE_09;
3637                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3638                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3639                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3640                 else
3641                         rv2p_fw_file = FW_RV2P_FILE_09;
3642         } else {
3643                 mips_fw_file = FW_MIPS_FILE_06;
3644                 rv2p_fw_file = FW_RV2P_FILE_06;
3645         }
3646
3647         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3648         if (rc) {
3649                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3650                 return rc;
3651         }
3652
3653         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3654         if (rc) {
3655                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3656                 return rc;
3657         }
3658         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3659         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3660         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3661             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3662             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3663             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3664             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3665             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3666                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3667                 return -EINVAL;
3668         }
3669         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3670             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3671             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3672                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3673                 return -EINVAL;
3674         }
3675
3676         return 0;
3677 }
3678
3679 static u32
3680 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3681 {
3682         switch (idx) {
3683         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3684                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3685                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3686                 break;
3687         }
3688         return rv2p_code;
3689 }
3690
3691 static int
3692 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3693              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3694 {
3695         u32 rv2p_code_len, file_offset;
3696         __be32 *rv2p_code;
3697         int i;
3698         u32 val, cmd, addr;
3699
3700         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3701         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3702
3703         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3704
3705         if (rv2p_proc == RV2P_PROC1) {
3706                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3707                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3708         } else {
3709                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3710                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3711         }
3712
3713         for (i = 0; i < rv2p_code_len; i += 8) {
3714                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3715                 rv2p_code++;
3716                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3717                 rv2p_code++;
3718
3719                 val = (i / 8) | cmd;
3720                 REG_WR(bp, addr, val);
3721         }
3722
3723         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3724         for (i = 0; i < 8; i++) {
3725                 u32 loc, code;
3726
3727                 loc = be32_to_cpu(fw_entry->fixup[i]);
3728                 if (loc && ((loc * 4) < rv2p_code_len)) {
3729                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3730                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3731                         code = be32_to_cpu(*(rv2p_code + loc));
3732                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3733                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3734
3735                         val = (loc / 2) | cmd;
3736                         REG_WR(bp, addr, val);
3737                 }
3738         }
3739
3740         /* Reset the processor, un-stall is done later. */
3741         if (rv2p_proc == RV2P_PROC1) {
3742                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3743         }
3744         else {
3745                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3746         }
3747
3748         return 0;
3749 }
3750
3751 static int
3752 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3753             const struct bnx2_mips_fw_file_entry *fw_entry)
3754 {
3755         u32 addr, len, file_offset;
3756         __be32 *data;
3757         u32 offset;
3758         u32 val;
3759
3760         /* Halt the CPU. */
3761         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3762         val |= cpu_reg->mode_value_halt;
3763         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3764         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3765
3766         /* Load the Text area. */
3767         addr = be32_to_cpu(fw_entry->text.addr);
3768         len = be32_to_cpu(fw_entry->text.len);
3769         file_offset = be32_to_cpu(fw_entry->text.offset);
3770         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3771
3772         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3773         if (len) {
3774                 int j;
3775
3776                 for (j = 0; j < (len / 4); j++, offset += 4)
3777                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3778         }
3779
3780         /* Load the Data area. */
3781         addr = be32_to_cpu(fw_entry->data.addr);
3782         len = be32_to_cpu(fw_entry->data.len);
3783         file_offset = be32_to_cpu(fw_entry->data.offset);
3784         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3785
3786         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3787         if (len) {
3788                 int j;
3789
3790                 for (j = 0; j < (len / 4); j++, offset += 4)
3791                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3792         }
3793
3794         /* Load the Read-Only area. */
3795         addr = be32_to_cpu(fw_entry->rodata.addr);
3796         len = be32_to_cpu(fw_entry->rodata.len);
3797         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3798         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3799
3800         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3801         if (len) {
3802                 int j;
3803
3804                 for (j = 0; j < (len / 4); j++, offset += 4)
3805                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3806         }
3807
3808         /* Clear the pre-fetch instruction. */
3809         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3810
3811         val = be32_to_cpu(fw_entry->start_addr);
3812         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3813
3814         /* Start the CPU. */
3815         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3816         val &= ~cpu_reg->mode_value_halt;
3817         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3818         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3819
3820         return 0;
3821 }
3822
3823 static int
3824 bnx2_init_cpus(struct bnx2 *bp)
3825 {
3826         const struct bnx2_mips_fw_file *mips_fw =
3827                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3828         const struct bnx2_rv2p_fw_file *rv2p_fw =
3829                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3830         int rc;
3831
3832         /* Initialize the RV2P processor. */
3833         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3834         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3835
3836         /* Initialize the RX Processor. */
3837         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3838         if (rc)
3839                 goto init_cpu_err;
3840
3841         /* Initialize the TX Processor. */
3842         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3843         if (rc)
3844                 goto init_cpu_err;
3845
3846         /* Initialize the TX Patch-up Processor. */
3847         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3848         if (rc)
3849                 goto init_cpu_err;
3850
3851         /* Initialize the Completion Processor. */
3852         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3853         if (rc)
3854                 goto init_cpu_err;
3855
3856         /* Initialize the Command Processor. */
3857         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3858
3859 init_cpu_err:
3860         return rc;
3861 }
3862
3863 static int
3864 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3865 {
3866         u16 pmcsr;
3867
3868         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3869
3870         switch (state) {
3871         case PCI_D0: {
3872                 u32 val;
3873
3874                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3875                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3876                         PCI_PM_CTRL_PME_STATUS);
3877
3878                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3879                         /* delay required during transition out of D3hot */
3880                         msleep(20);
3881
3882                 val = REG_RD(bp, BNX2_EMAC_MODE);
3883                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3884                 val &= ~BNX2_EMAC_MODE_MPKT;
3885                 REG_WR(bp, BNX2_EMAC_MODE, val);
3886
3887                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3888                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3889                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3890                 break;
3891         }
3892         case PCI_D3hot: {
3893                 int i;
3894                 u32 val, wol_msg;
3895
3896                 if (bp->wol) {
3897                         u32 advertising;
3898                         u8 autoneg;
3899
3900                         autoneg = bp->autoneg;
3901                         advertising = bp->advertising;
3902
3903                         if (bp->phy_port == PORT_TP) {
3904                                 bp->autoneg = AUTONEG_SPEED;
3905                                 bp->advertising = ADVERTISED_10baseT_Half |
3906                                         ADVERTISED_10baseT_Full |
3907                                         ADVERTISED_100baseT_Half |
3908                                         ADVERTISED_100baseT_Full |
3909                                         ADVERTISED_Autoneg;
3910                         }
3911
3912                         spin_lock_bh(&bp->phy_lock);
3913                         bnx2_setup_phy(bp, bp->phy_port);
3914                         spin_unlock_bh(&bp->phy_lock);
3915
3916                         bp->autoneg = autoneg;
3917                         bp->advertising = advertising;
3918
3919                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3920
3921                         val = REG_RD(bp, BNX2_EMAC_MODE);
3922
3923                         /* Enable port mode. */
3924                         val &= ~BNX2_EMAC_MODE_PORT;
3925                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3926                                BNX2_EMAC_MODE_ACPI_RCVD |
3927                                BNX2_EMAC_MODE_MPKT;
3928                         if (bp->phy_port == PORT_TP)
3929                                 val |= BNX2_EMAC_MODE_PORT_MII;
3930                         else {
3931                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3932                                 if (bp->line_speed == SPEED_2500)
3933                                         val |= BNX2_EMAC_MODE_25G_MODE;
3934                         }
3935
3936                         REG_WR(bp, BNX2_EMAC_MODE, val);
3937
3938                         /* receive all multicast */
3939                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3940                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3941                                        0xffffffff);
3942                         }
3943                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3944                                BNX2_EMAC_RX_MODE_SORT_MODE);
3945
3946                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3947                               BNX2_RPM_SORT_USER0_MC_EN;
3948                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3949                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3950                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3951                                BNX2_RPM_SORT_USER0_ENA);
3952
3953                         /* Need to enable EMAC and RPM for WOL. */
3954                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3955                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3956                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3957                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3958
3959                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3960                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3961                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3962
3963                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3964                 }
3965                 else {
3966                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3967                 }
3968
3969                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3970                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3971                                      1, 0);
3972
3973                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3974                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3975                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3976
3977                         if (bp->wol)
3978                                 pmcsr |= 3;
3979                 }
3980                 else {
3981                         pmcsr |= 3;
3982                 }
3983                 if (bp->wol) {
3984                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3985                 }
3986                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3987                                       pmcsr);
3988
3989                 /* No more memory access after this point until
3990                  * device is brought back to D0.
3991                  */
3992                 udelay(50);
3993                 break;
3994         }
3995         default:
3996                 return -EINVAL;
3997         }
3998         return 0;
3999 }
4000
4001 static int
4002 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4003 {
4004         u32 val;
4005         int j;
4006
4007         /* Request access to the flash interface. */
4008         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4009         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4010                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4011                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4012                         break;
4013
4014                 udelay(5);
4015         }
4016
4017         if (j >= NVRAM_TIMEOUT_COUNT)
4018                 return -EBUSY;
4019
4020         return 0;
4021 }
4022
4023 static int
4024 bnx2_release_nvram_lock(struct bnx2 *bp)
4025 {
4026         int j;
4027         u32 val;
4028
4029         /* Relinquish nvram interface. */
4030         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4031
4032         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4033                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4034                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4035                         break;
4036
4037                 udelay(5);
4038         }
4039
4040         if (j >= NVRAM_TIMEOUT_COUNT)
4041                 return -EBUSY;
4042
4043         return 0;
4044 }
4045
4046
4047 static int
4048 bnx2_enable_nvram_write(struct bnx2 *bp)
4049 {
4050         u32 val;
4051
4052         val = REG_RD(bp, BNX2_MISC_CFG);
4053         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4054
4055         if (bp->flash_info->flags & BNX2_NV_WREN) {
4056                 int j;
4057
4058                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4059                 REG_WR(bp, BNX2_NVM_COMMAND,
4060                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4061
4062                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4063                         udelay(5);
4064
4065                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4066                         if (val & BNX2_NVM_COMMAND_DONE)
4067                                 break;
4068                 }
4069
4070                 if (j >= NVRAM_TIMEOUT_COUNT)
4071                         return -EBUSY;
4072         }
4073         return 0;
4074 }
4075
4076 static void
4077 bnx2_disable_nvram_write(struct bnx2 *bp)
4078 {
4079         u32 val;
4080
4081         val = REG_RD(bp, BNX2_MISC_CFG);
4082         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4083 }
4084
4085
4086 static void
4087 bnx2_enable_nvram_access(struct bnx2 *bp)
4088 {
4089         u32 val;
4090
4091         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4092         /* Enable both bits, even on read. */
4093         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4094                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4095 }
4096
4097 static void
4098 bnx2_disable_nvram_access(struct bnx2 *bp)
4099 {
4100         u32 val;
4101
4102         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4103         /* Disable both bits, even after read. */
4104         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4105                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4106                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4107 }
4108
4109 static int
4110 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4111 {
4112         u32 cmd;
4113         int j;
4114
4115         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4116                 /* Buffered flash, no erase needed */
4117                 return 0;
4118
4119         /* Build an erase command */
4120         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4121               BNX2_NVM_COMMAND_DOIT;
4122
4123         /* Need to clear DONE bit separately. */
4124         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4125
4126         /* Address of the NVRAM to read from. */
4127         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4128
4129         /* Issue an erase command. */
4130         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4131
4132         /* Wait for completion. */
4133         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4134                 u32 val;
4135
4136                 udelay(5);
4137
4138                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4139                 if (val & BNX2_NVM_COMMAND_DONE)
4140                         break;
4141         }
4142
4143         if (j >= NVRAM_TIMEOUT_COUNT)
4144                 return -EBUSY;
4145
4146         return 0;
4147 }
4148
4149 static int
4150 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4151 {
4152         u32 cmd;
4153         int j;
4154
4155         /* Build the command word. */
4156         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4157
4158         /* Calculate an offset of a buffered flash, not needed for 5709. */
4159         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4160                 offset = ((offset / bp->flash_info->page_size) <<
4161                            bp->flash_info->page_bits) +
4162                           (offset % bp->flash_info->page_size);
4163         }
4164
4165         /* Need to clear DONE bit separately. */
4166         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4167
4168         /* Address of the NVRAM to read from. */
4169         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4170
4171         /* Issue a read command. */
4172         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4173
4174         /* Wait for completion. */
4175         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4176                 u32 val;
4177
4178                 udelay(5);
4179
4180                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4181                 if (val & BNX2_NVM_COMMAND_DONE) {
4182                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4183                         memcpy(ret_val, &v, 4);
4184                         break;
4185                 }
4186         }
4187         if (j >= NVRAM_TIMEOUT_COUNT)
4188                 return -EBUSY;
4189
4190         return 0;
4191 }
4192
4193
4194 static int
4195 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4196 {
4197         u32 cmd;
4198         __be32 val32;
4199         int j;
4200
4201         /* Build the command word. */
4202         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4203
4204         /* Calculate an offset of a buffered flash, not needed for 5709. */
4205         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4206                 offset = ((offset / bp->flash_info->page_size) <<
4207                           bp->flash_info->page_bits) +
4208                          (offset % bp->flash_info->page_size);
4209         }
4210
4211         /* Need to clear DONE bit separately. */
4212         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4213
4214         memcpy(&val32, val, 4);
4215
4216         /* Write the data. */
4217         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4218
4219         /* Address of the NVRAM to write to. */
4220         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4221
4222         /* Issue the write command. */
4223         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4224
4225         /* Wait for completion. */
4226         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4227                 udelay(5);
4228
4229                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4230                         break;
4231         }
4232         if (j >= NVRAM_TIMEOUT_COUNT)
4233                 return -EBUSY;
4234
4235         return 0;
4236 }
4237
4238 static int
4239 bnx2_init_nvram(struct bnx2 *bp)
4240 {
4241         u32 val;
4242         int j, entry_count, rc = 0;
4243         const struct flash_spec *flash;
4244
4245         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4246                 bp->flash_info = &flash_5709;
4247                 goto get_flash_size;
4248         }
4249
4250         /* Determine the selected interface. */
4251         val = REG_RD(bp, BNX2_NVM_CFG1);
4252
4253         entry_count = ARRAY_SIZE(flash_table);
4254
4255         if (val & 0x40000000) {
4256
4257                 /* Flash interface has been reconfigured */
4258                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4259                      j++, flash++) {
4260                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4261                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4262                                 bp->flash_info = flash;
4263                                 break;
4264                         }
4265                 }
4266         }
4267         else {
4268                 u32 mask;
4269                 /* Not yet been reconfigured */
4270
4271                 if (val & (1 << 23))
4272                         mask = FLASH_BACKUP_STRAP_MASK;
4273                 else
4274                         mask = FLASH_STRAP_MASK;
4275
4276                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4277                         j++, flash++) {
4278
4279                         if ((val & mask) == (flash->strapping & mask)) {
4280                                 bp->flash_info = flash;
4281
4282                                 /* Request access to the flash interface. */
4283                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4284                                         return rc;
4285
4286                                 /* Enable access to flash interface */
4287                                 bnx2_enable_nvram_access(bp);
4288
4289                                 /* Reconfigure the flash interface */
4290                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4291                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4292                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4293                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4294
4295                                 /* Disable access to flash interface */
4296                                 bnx2_disable_nvram_access(bp);
4297                                 bnx2_release_nvram_lock(bp);
4298
4299                                 break;
4300                         }
4301                 }
4302         } /* if (val & 0x40000000) */
4303
4304         if (j == entry_count) {
4305                 bp->flash_info = NULL;
4306                 pr_alert("Unknown flash/EEPROM type\n");
4307                 return -ENODEV;
4308         }
4309
4310 get_flash_size:
4311         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4312         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4313         if (val)
4314                 bp->flash_size = val;
4315         else
4316                 bp->flash_size = bp->flash_info->total_size;
4317
4318         return rc;
4319 }
4320
4321 static int
4322 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4323                 int buf_size)
4324 {
4325         int rc = 0;
4326         u32 cmd_flags, offset32, len32, extra;
4327
4328         if (buf_size == 0)
4329                 return 0;
4330
4331         /* Request access to the flash interface. */
4332         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4333                 return rc;
4334
4335         /* Enable access to flash interface */
4336         bnx2_enable_nvram_access(bp);
4337
4338         len32 = buf_size;
4339         offset32 = offset;
4340         extra = 0;
4341
4342         cmd_flags = 0;
4343
4344         if (offset32 & 3) {
4345                 u8 buf[4];
4346                 u32 pre_len;
4347
4348                 offset32 &= ~3;
4349                 pre_len = 4 - (offset & 3);
4350
4351                 if (pre_len >= len32) {
4352                         pre_len = len32;
4353                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4354                                     BNX2_NVM_COMMAND_LAST;
4355                 }
4356                 else {
4357                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4358                 }
4359
4360                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4361
4362                 if (rc)
4363                         return rc;
4364
4365                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4366
4367                 offset32 += 4;
4368                 ret_buf += pre_len;
4369                 len32 -= pre_len;
4370         }
4371         if (len32 & 3) {
4372                 extra = 4 - (len32 & 3);
4373                 len32 = (len32 + 4) & ~3;
4374         }
4375
4376         if (len32 == 4) {
4377                 u8 buf[4];
4378
4379                 if (cmd_flags)
4380                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4381                 else
4382                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4383                                     BNX2_NVM_COMMAND_LAST;
4384
4385                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4386
4387                 memcpy(ret_buf, buf, 4 - extra);
4388         }
4389         else if (len32 > 0) {
4390                 u8 buf[4];
4391
4392                 /* Read the first word. */
4393                 if (cmd_flags)
4394                         cmd_flags = 0;
4395                 else
4396                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4397
4398                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4399
4400                 /* Advance to the next dword. */
4401                 offset32 += 4;
4402                 ret_buf += 4;
4403                 len32 -= 4;
4404
4405                 while (len32 > 4 && rc == 0) {
4406                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4407
4408                         /* Advance to the next dword. */
4409                         offset32 += 4;
4410                         ret_buf += 4;
4411                         len32 -= 4;
4412                 }
4413
4414                 if (rc)
4415                         return rc;
4416
4417                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4418                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4419
4420                 memcpy(ret_buf, buf, 4 - extra);
4421         }
4422
4423         /* Disable access to flash interface */
4424         bnx2_disable_nvram_access(bp);
4425
4426         bnx2_release_nvram_lock(bp);
4427
4428         return rc;
4429 }
4430
4431 static int
4432 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4433                 int buf_size)
4434 {
4435         u32 written, offset32, len32;
4436         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4437         int rc = 0;
4438         int align_start, align_end;
4439
4440         buf = data_buf;
4441         offset32 = offset;
4442         len32 = buf_size;
4443         align_start = align_end = 0;
4444
4445         if ((align_start = (offset32 & 3))) {
4446                 offset32 &= ~3;
4447                 len32 += align_start;
4448                 if (len32 < 4)
4449                         len32 = 4;
4450                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4451                         return rc;
4452         }
4453
4454         if (len32 & 3) {
4455                 align_end = 4 - (len32 & 3);
4456                 len32 += align_end;
4457                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4458                         return rc;
4459         }
4460
4461         if (align_start || align_end) {
4462                 align_buf = kmalloc(len32, GFP_KERNEL);
4463                 if (align_buf == NULL)
4464                         return -ENOMEM;
4465                 if (align_start) {
4466                         memcpy(align_buf, start, 4);
4467                 }
4468                 if (align_end) {
4469                         memcpy(align_buf + len32 - 4, end, 4);
4470                 }
4471                 memcpy(align_buf + align_start, data_buf, buf_size);
4472                 buf = align_buf;
4473         }
4474
4475         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4476                 flash_buffer = kmalloc(264, GFP_KERNEL);
4477                 if (flash_buffer == NULL) {
4478                         rc = -ENOMEM;
4479                         goto nvram_write_end;
4480                 }
4481         }
4482
4483         written = 0;
4484         while ((written < len32) && (rc == 0)) {
4485                 u32 page_start, page_end, data_start, data_end;
4486                 u32 addr, cmd_flags;
4487                 int i;
4488
4489                 /* Find the page_start addr */
4490                 page_start = offset32 + written;
4491                 page_start -= (page_start % bp->flash_info->page_size);
4492                 /* Find the page_end addr */
4493                 page_end = page_start + bp->flash_info->page_size;
4494                 /* Find the data_start addr */
4495                 data_start = (written == 0) ? offset32 : page_start;
4496                 /* Find the data_end addr */
4497                 data_end = (page_end > offset32 + len32) ?
4498                         (offset32 + len32) : page_end;
4499
4500                 /* Request access to the flash interface. */
4501                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4502                         goto nvram_write_end;
4503
4504                 /* Enable access to flash interface */
4505                 bnx2_enable_nvram_access(bp);
4506
4507                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4508                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4509                         int j;
4510
4511                         /* Read the whole page into the buffer
4512                          * (non-buffer flash only) */
4513                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4514                                 if (j == (bp->flash_info->page_size - 4)) {
4515                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4516                                 }
4517                                 rc = bnx2_nvram_read_dword(bp,
4518                                         page_start + j,
4519                                         &flash_buffer[j],
4520                                         cmd_flags);
4521
4522                                 if (rc)
4523                                         goto nvram_write_end;
4524
4525                                 cmd_flags = 0;
4526                         }
4527                 }
4528
4529                 /* Enable writes to flash interface (unlock write-protect) */
4530                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4531                         goto nvram_write_end;
4532
4533                 /* Loop to write back the buffer data from page_start to
4534                  * data_start */
4535                 i = 0;
4536                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4537                         /* Erase the page */
4538                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4539                                 goto nvram_write_end;
4540
4541                         /* Re-enable the write again for the actual write */
4542                         bnx2_enable_nvram_write(bp);
4543
4544                         for (addr = page_start; addr < data_start;
4545                                 addr += 4, i += 4) {
4546
4547                                 rc = bnx2_nvram_write_dword(bp, addr,
4548                                         &flash_buffer[i], cmd_flags);
4549
4550                                 if (rc != 0)
4551                                         goto nvram_write_end;
4552
4553                                 cmd_flags = 0;
4554                         }
4555                 }
4556
4557                 /* Loop to write the new data from data_start to data_end */
4558                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4559                         if ((addr == page_end - 4) ||
4560                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4561                                  (addr == data_end - 4))) {
4562
4563                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4564                         }
4565                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4566                                 cmd_flags);
4567
4568                         if (rc != 0)
4569                                 goto nvram_write_end;
4570
4571                         cmd_flags = 0;
4572                         buf += 4;
4573                 }
4574
4575                 /* Loop to write back the buffer data from data_end
4576                  * to page_end */
4577                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4578                         for (addr = data_end; addr < page_end;
4579                                 addr += 4, i += 4) {
4580
4581                                 if (addr == page_end-4) {
4582                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4583                                 }
4584                                 rc = bnx2_nvram_write_dword(bp, addr,
4585                                         &flash_buffer[i], cmd_flags);
4586
4587                                 if (rc != 0)
4588                                         goto nvram_write_end;
4589
4590                                 cmd_flags = 0;
4591                         }
4592                 }
4593
4594                 /* Disable writes to flash interface (lock write-protect) */
4595                 bnx2_disable_nvram_write(bp);
4596
4597                 /* Disable access to flash interface */
4598                 bnx2_disable_nvram_access(bp);
4599                 bnx2_release_nvram_lock(bp);
4600
4601                 /* Increment written */
4602                 written += data_end - data_start;
4603         }
4604
4605 nvram_write_end:
4606         kfree(flash_buffer);
4607         kfree(align_buf);
4608         return rc;
4609 }
4610
4611 static void
4612 bnx2_init_fw_cap(struct bnx2 *bp)
4613 {
4614         u32 val, sig = 0;
4615
4616         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4617         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4618
4619         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4620                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4621
4622         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4623         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4624                 return;
4625
4626         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4627                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4628                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4629         }
4630
4631         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4632             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4633                 u32 link;
4634
4635                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4636
4637                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4638                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4639                         bp->phy_port = PORT_FIBRE;
4640                 else
4641                         bp->phy_port = PORT_TP;
4642
4643                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4644                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4645         }
4646
4647         if (netif_running(bp->dev) && sig)
4648                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4649 }
4650
4651 static void
4652 bnx2_setup_msix_tbl(struct bnx2 *bp)
4653 {
4654         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4655
4656         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4657         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4658 }
4659
4660 static int
4661 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4662 {
4663         u32 val;
4664         int i, rc = 0;
4665         u8 old_port;
4666
4667         /* Wait for the current PCI transaction to complete before
4668          * issuing a reset. */
4669         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4670                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4671                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4672                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4673                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4674         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4675         udelay(5);
4676
4677         /* Wait for the firmware to tell us it is ok to issue a reset. */
4678         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4679
4680         /* Deposit a driver reset signature so the firmware knows that
4681          * this is a soft reset. */
4682         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4683                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4684
4685         /* Do a dummy read to force the chip to complete all current transaction
4686          * before we issue a reset. */
4687         val = REG_RD(bp, BNX2_MISC_ID);
4688
4689         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4690                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4691                 REG_RD(bp, BNX2_MISC_COMMAND);
4692                 udelay(5);
4693
4694                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4695                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4696
4697                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4698
4699         } else {
4700                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4701                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4702                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4703
4704                 /* Chip reset. */
4705                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4706
4707                 /* Reading back any register after chip reset will hang the
4708                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4709                  * of margin for write posting.
4710                  */
4711                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4712                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4713                         msleep(20);
4714
4715                 /* Reset takes approximate 30 usec */
4716                 for (i = 0; i < 10; i++) {
4717                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4718                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4719                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4720                                 break;
4721                         udelay(10);
4722                 }
4723
4724                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4725                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4726                         pr_err("Chip reset did not complete\n");
4727                         return -EBUSY;
4728                 }
4729         }
4730
4731         /* Make sure byte swapping is properly configured. */
4732         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4733         if (val != 0x01020304) {
4734                 pr_err("Chip not in correct endian mode\n");
4735                 return -ENODEV;
4736         }
4737
4738         /* Wait for the firmware to finish its initialization. */
4739         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4740         if (rc)
4741                 return rc;
4742
4743         spin_lock_bh(&bp->phy_lock);
4744         old_port = bp->phy_port;
4745         bnx2_init_fw_cap(bp);
4746         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4747             old_port != bp->phy_port)
4748                 bnx2_set_default_remote_link(bp);
4749         spin_unlock_bh(&bp->phy_lock);
4750
4751         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4752                 /* Adjust the voltage regular to two steps lower.  The default
4753                  * of this register is 0x0000000e. */
4754                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4755
4756                 /* Remove bad rbuf memory from the free pool. */
4757                 rc = bnx2_alloc_bad_rbuf(bp);
4758         }
4759
4760         if (bp->flags & BNX2_FLAG_USING_MSIX)
4761                 bnx2_setup_msix_tbl(bp);
4762
4763         return rc;
4764 }
4765
4766 static int
4767 bnx2_init_chip(struct bnx2 *bp)
4768 {
4769         u32 val, mtu;
4770         int rc, i;
4771
4772         /* Make sure the interrupt is not active. */
4773         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4774
4775         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4776               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4777 #ifdef __BIG_ENDIAN
4778               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4779 #endif
4780               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4781               DMA_READ_CHANS << 12 |
4782               DMA_WRITE_CHANS << 16;
4783
4784         val |= (0x2 << 20) | (1 << 11);
4785
4786         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4787                 val |= (1 << 23);
4788
4789         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4790             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4791                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4792
4793         REG_WR(bp, BNX2_DMA_CONFIG, val);
4794
4795         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4796                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4797                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4798                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4799         }
4800
4801         if (bp->flags & BNX2_FLAG_PCIX) {
4802                 u16 val16;
4803
4804                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4805                                      &val16);
4806                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4807                                       val16 & ~PCI_X_CMD_ERO);
4808         }
4809
4810         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4811                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4812                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4813                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4814
4815         /* Initialize context mapping and zero out the quick contexts.  The
4816          * context block must have already been enabled. */
4817         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4818                 rc = bnx2_init_5709_context(bp);
4819                 if (rc)
4820                         return rc;
4821         } else
4822                 bnx2_init_context(bp);
4823
4824         if ((rc = bnx2_init_cpus(bp)) != 0)
4825                 return rc;
4826
4827         bnx2_init_nvram(bp);
4828
4829         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4830
4831         val = REG_RD(bp, BNX2_MQ_CONFIG);
4832         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4833         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4834         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4835                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4836                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4837                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4838         }
4839
4840         REG_WR(bp, BNX2_MQ_CONFIG, val);
4841
4842         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4843         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4844         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4845
4846         val = (BCM_PAGE_BITS - 8) << 24;
4847         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4848
4849         /* Configure page size. */
4850         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4851         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4852         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4853         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4854
4855         val = bp->mac_addr[0] +
4856               (bp->mac_addr[1] << 8) +
4857               (bp->mac_addr[2] << 16) +
4858               bp->mac_addr[3] +
4859               (bp->mac_addr[4] << 8) +
4860               (bp->mac_addr[5] << 16);
4861         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4862
4863         /* Program the MTU.  Also include 4 bytes for CRC32. */
4864         mtu = bp->dev->mtu;
4865         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4866         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4867                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4868         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4869
4870         if (mtu < 1500)
4871                 mtu = 1500;
4872
4873         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4874         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4875         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4876
4877         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4878         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4879                 bp->bnx2_napi[i].last_status_idx = 0;
4880
4881         bp->idle_chk_status_idx = 0xffff;
4882
4883         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4884
4885         /* Set up how to generate a link change interrupt. */
4886         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4887
4888         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4889                (u64) bp->status_blk_mapping & 0xffffffff);
4890         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4891
4892         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4893                (u64) bp->stats_blk_mapping & 0xffffffff);
4894         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4895                (u64) bp->stats_blk_mapping >> 32);
4896
4897         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4898                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4899
4900         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4901                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4902
4903         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4904                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4905
4906         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4907
4908         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4909
4910         REG_WR(bp, BNX2_HC_COM_TICKS,
4911                (bp->com_ticks_int << 16) | bp->com_ticks);
4912
4913         REG_WR(bp, BNX2_HC_CMD_TICKS,
4914                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4915
4916         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4917                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4918         else
4919                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4920         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4921
4922         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4923                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4924         else {
4925                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4926                       BNX2_HC_CONFIG_COLLECT_STATS;
4927         }
4928
4929         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4930                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4931                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4932
4933                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4934         }
4935
4936         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4937                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4938
4939         REG_WR(bp, BNX2_HC_CONFIG, val);
4940
4941         for (i = 1; i < bp->irq_nvecs; i++) {
4942                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4943                            BNX2_HC_SB_CONFIG_1;
4944
4945                 REG_WR(bp, base,
4946                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4947                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4948                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4949
4950                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4951                         (bp->tx_quick_cons_trip_int << 16) |
4952                          bp->tx_quick_cons_trip);
4953
4954                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4955                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4956
4957                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4958                        (bp->rx_quick_cons_trip_int << 16) |
4959                         bp->rx_quick_cons_trip);
4960
4961                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4962                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4963         }
4964
4965         /* Clear internal stats counters. */
4966         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4967
4968         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4969
4970         /* Initialize the receive filter. */
4971         bnx2_set_rx_mode(bp->dev);
4972
4973         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4974                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4975                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4976                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4977         }
4978         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4979                           1, 0);
4980
4981         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4982         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4983
4984         udelay(20);
4985
4986         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4987
4988         return rc;
4989 }
4990
4991 static void
4992 bnx2_clear_ring_states(struct bnx2 *bp)
4993 {
4994         struct bnx2_napi *bnapi;
4995         struct bnx2_tx_ring_info *txr;
4996         struct bnx2_rx_ring_info *rxr;
4997         int i;
4998
4999         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5000                 bnapi = &bp->bnx2_napi[i];
5001                 txr = &bnapi->tx_ring;
5002                 rxr = &bnapi->rx_ring;
5003
5004                 txr->tx_cons = 0;
5005                 txr->hw_tx_cons = 0;
5006                 rxr->rx_prod_bseq = 0;
5007                 rxr->rx_prod = 0;
5008                 rxr->rx_cons = 0;
5009                 rxr->rx_pg_prod = 0;
5010                 rxr->rx_pg_cons = 0;
5011         }
5012 }
5013
5014 static void
5015 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5016 {
5017         u32 val, offset0, offset1, offset2, offset3;
5018         u32 cid_addr = GET_CID_ADDR(cid);
5019
5020         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5021                 offset0 = BNX2_L2CTX_TYPE_XI;
5022                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5023                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5024                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5025         } else {
5026                 offset0 = BNX2_L2CTX_TYPE;
5027                 offset1 = BNX2_L2CTX_CMD_TYPE;
5028                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5029                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5030         }
5031         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5032         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5033
5034         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5035         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5036
5037         val = (u64) txr->tx_desc_mapping >> 32;
5038         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5039
5040         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5041         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5042 }
5043
5044 static void
5045 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5046 {
5047         struct tx_bd *txbd;
5048         u32 cid = TX_CID;
5049         struct bnx2_napi *bnapi;
5050         struct bnx2_tx_ring_info *txr;
5051
5052         bnapi = &bp->bnx2_napi[ring_num];
5053         txr = &bnapi->tx_ring;
5054
5055         if (ring_num == 0)
5056                 cid = TX_CID;
5057         else
5058                 cid = TX_TSS_CID + ring_num - 1;
5059
5060         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5061
5062         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5063
5064         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5065         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5066
5067         txr->tx_prod = 0;
5068         txr->tx_prod_bseq = 0;
5069
5070         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5071         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5072
5073         bnx2_init_tx_context(bp, cid, txr);
5074 }
5075
5076 static void
5077 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5078                      int num_rings)
5079 {
5080         int i;
5081         struct rx_bd *rxbd;
5082
5083         for (i = 0; i < num_rings; i++) {
5084                 int j;
5085
5086                 rxbd = &rx_ring[i][0];
5087                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5088                         rxbd->rx_bd_len = buf_size;
5089                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5090                 }
5091                 if (i == (num_rings - 1))
5092                         j = 0;
5093                 else
5094                         j = i + 1;
5095                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5096                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5097         }
5098 }
5099
5100 static void
5101 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5102 {
5103         int i;
5104         u16 prod, ring_prod;
5105         u32 cid, rx_cid_addr, val;
5106         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5107         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5108
5109         if (ring_num == 0)
5110                 cid = RX_CID;
5111         else
5112                 cid = RX_RSS_CID + ring_num - 1;
5113
5114         rx_cid_addr = GET_CID_ADDR(cid);
5115
5116         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5117                              bp->rx_buf_use_size, bp->rx_max_ring);
5118
5119         bnx2_init_rx_context(bp, cid);
5120
5121         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5122                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5123                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5124         }
5125
5126         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5127         if (bp->rx_pg_ring_size) {
5128                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5129                                      rxr->rx_pg_desc_mapping,
5130                                      PAGE_SIZE, bp->rx_max_pg_ring);
5131                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5132                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5133                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5134                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5135
5136                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5137                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5138
5139                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5141
5142                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5143                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5144         }
5145
5146         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5147         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5148
5149         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5150         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5151
5152         ring_prod = prod = rxr->rx_pg_prod;
5153         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5154                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5155                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5156                                     ring_num, i, bp->rx_pg_ring_size);
5157                         break;
5158                 }
5159                 prod = NEXT_RX_BD(prod);
5160                 ring_prod = RX_PG_RING_IDX(prod);
5161         }
5162         rxr->rx_pg_prod = prod;
5163
5164         ring_prod = prod = rxr->rx_prod;
5165         for (i = 0; i < bp->rx_ring_size; i++) {
5166                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5167                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5168                                     ring_num, i, bp->rx_ring_size);
5169                         break;
5170                 }
5171                 prod = NEXT_RX_BD(prod);
5172                 ring_prod = RX_RING_IDX(prod);
5173         }
5174         rxr->rx_prod = prod;
5175
5176         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5177         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5178         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5179
5180         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5181         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5182
5183         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5184 }
5185
5186 static void
5187 bnx2_init_all_rings(struct bnx2 *bp)
5188 {
5189         int i;
5190         u32 val;
5191
5192         bnx2_clear_ring_states(bp);
5193
5194         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5195         for (i = 0; i < bp->num_tx_rings; i++)
5196                 bnx2_init_tx_ring(bp, i);
5197
5198         if (bp->num_tx_rings > 1)
5199                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5200                        (TX_TSS_CID << 7));
5201
5202         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5203         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5204
5205         for (i = 0; i < bp->num_rx_rings; i++)
5206                 bnx2_init_rx_ring(bp, i);
5207
5208         if (bp->num_rx_rings > 1) {
5209                 u32 tbl_32;
5210                 u8 *tbl = (u8 *) &tbl_32;
5211
5212                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5213                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5214
5215                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5216                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5217                         if ((i % 4) == 3)
5218                                 bnx2_reg_wr_ind(bp,
5219                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5220                                                 cpu_to_be32(tbl_32));
5221                 }
5222
5223                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5224                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5225
5226                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5227
5228         }
5229 }
5230
5231 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5232 {
5233         u32 max, num_rings = 1;
5234
5235         while (ring_size > MAX_RX_DESC_CNT) {
5236                 ring_size -= MAX_RX_DESC_CNT;
5237                 num_rings++;
5238         }
5239         /* round to next power of 2 */
5240         max = max_size;
5241         while ((max & num_rings) == 0)
5242                 max >>= 1;
5243
5244         if (num_rings != max)
5245                 max <<= 1;
5246
5247         return max;
5248 }
5249
5250 static void
5251 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5252 {
5253         u32 rx_size, rx_space, jumbo_size;
5254
5255         /* 8 for CRC and VLAN */
5256         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5257
5258         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5259                 sizeof(struct skb_shared_info);
5260
5261         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5262         bp->rx_pg_ring_size = 0;
5263         bp->rx_max_pg_ring = 0;
5264         bp->rx_max_pg_ring_idx = 0;
5265         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5266                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5267
5268                 jumbo_size = size * pages;
5269                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5270                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5271
5272                 bp->rx_pg_ring_size = jumbo_size;
5273                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5274                                                         MAX_RX_PG_RINGS);
5275                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5276                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5277                 bp->rx_copy_thresh = 0;
5278         }
5279
5280         bp->rx_buf_use_size = rx_size;
5281         /* hw alignment */
5282         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5283         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5284         bp->rx_ring_size = size;
5285         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5286         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5287 }
5288
5289 static void
5290 bnx2_free_tx_skbs(struct bnx2 *bp)
5291 {
5292         int i;
5293
5294         for (i = 0; i < bp->num_tx_rings; i++) {
5295                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5296                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5297                 int j;
5298
5299                 if (txr->tx_buf_ring == NULL)
5300                         continue;
5301
5302                 for (j = 0; j < TX_DESC_CNT; ) {
5303                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5304                         struct sk_buff *skb = tx_buf->skb;
5305                         int k, last;
5306
5307                         if (skb == NULL) {
5308                                 j++;
5309                                 continue;
5310                         }
5311
5312                         pci_unmap_single(bp->pdev,
5313                                          pci_unmap_addr(tx_buf, mapping),
5314                                          skb_headlen(skb),
5315                                          PCI_DMA_TODEVICE);
5316
5317                         tx_buf->skb = NULL;
5318
5319                         last = tx_buf->nr_frags;
5320                         j++;
5321                         for (k = 0; k < last; k++, j++) {
5322                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5323                                 pci_unmap_page(bp->pdev,
5324                                         pci_unmap_addr(tx_buf, mapping),
5325                                         skb_shinfo(skb)->frags[k].size,
5326                                         PCI_DMA_TODEVICE);
5327                         }
5328                         dev_kfree_skb(skb);
5329                 }
5330         }
5331 }
5332
5333 static void
5334 bnx2_free_rx_skbs(struct bnx2 *bp)
5335 {
5336         int i;
5337
5338         for (i = 0; i < bp->num_rx_rings; i++) {
5339                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5340                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5341                 int j;
5342
5343                 if (rxr->rx_buf_ring == NULL)
5344                         return;
5345
5346                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5347                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5348                         struct sk_buff *skb = rx_buf->skb;
5349
5350                         if (skb == NULL)
5351                                 continue;
5352
5353                         pci_unmap_single(bp->pdev,
5354                                          pci_unmap_addr(rx_buf, mapping),
5355                                          bp->rx_buf_use_size,
5356                                          PCI_DMA_FROMDEVICE);
5357
5358                         rx_buf->skb = NULL;
5359
5360                         dev_kfree_skb(skb);
5361                 }
5362                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5363                         bnx2_free_rx_page(bp, rxr, j);
5364         }
5365 }
5366
5367 static void
5368 bnx2_free_skbs(struct bnx2 *bp)
5369 {
5370         bnx2_free_tx_skbs(bp);
5371         bnx2_free_rx_skbs(bp);
5372 }
5373
5374 static int
5375 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5376 {
5377         int rc;
5378
5379         rc = bnx2_reset_chip(bp, reset_code);
5380         bnx2_free_skbs(bp);
5381         if (rc)
5382                 return rc;
5383
5384         if ((rc = bnx2_init_chip(bp)) != 0)
5385                 return rc;
5386
5387         bnx2_init_all_rings(bp);
5388         return 0;
5389 }
5390
5391 static int
5392 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5393 {
5394         int rc;
5395
5396         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5397                 return rc;
5398
5399         spin_lock_bh(&bp->phy_lock);
5400         bnx2_init_phy(bp, reset_phy);
5401         bnx2_set_link(bp);
5402         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5403                 bnx2_remote_phy_event(bp);
5404         spin_unlock_bh(&bp->phy_lock);
5405         return 0;
5406 }
5407
5408 static int
5409 bnx2_shutdown_chip(struct bnx2 *bp)
5410 {
5411         u32 reset_code;
5412
5413         if (bp->flags & BNX2_FLAG_NO_WOL)
5414                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5415         else if (bp->wol)
5416                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5417         else
5418                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5419
5420         return bnx2_reset_chip(bp, reset_code);
5421 }
5422
5423 static int
5424 bnx2_test_registers(struct bnx2 *bp)
5425 {
5426         int ret;
5427         int i, is_5709;
5428         static const struct {
5429                 u16   offset;
5430                 u16   flags;
5431 #define BNX2_FL_NOT_5709        1
5432                 u32   rw_mask;
5433                 u32   ro_mask;
5434         } reg_tbl[] = {
5435                 { 0x006c, 0, 0x00000000, 0x0000003f },
5436                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5437                 { 0x0094, 0, 0x00000000, 0x00000000 },
5438
5439                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5440                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5441                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5442                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5443                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5444                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5445                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5446                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5447                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448
5449                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5452                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455
5456                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5457                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5458                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5459
5460                 { 0x1000, 0, 0x00000000, 0x00000001 },
5461                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5462
5463                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5464                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5465                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5466                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5467                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5468                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5469                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5470                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5471                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5472                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5473
5474                 { 0x1800, 0, 0x00000000, 0x00000001 },
5475                 { 0x1804, 0, 0x00000000, 0x00000003 },
5476
5477                 { 0x2800, 0, 0x00000000, 0x00000001 },
5478                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5479                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5480                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5481                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5482                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5483                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5484                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5485                 { 0x2840, 0, 0x00000000, 0xffffffff },
5486                 { 0x2844, 0, 0x00000000, 0xffffffff },
5487                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5488                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5489
5490                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5491                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5492
5493                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5494                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5495                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5496                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5497                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5498                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5499                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5500                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5501                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5502
5503                 { 0x5004, 0, 0x00000000, 0x0000007f },
5504                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5505
5506                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5507                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5508                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5509                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5510                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5511                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5512                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5513                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5514                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5515
5516                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5517                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5518                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5519                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5520                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5521                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5522                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5523                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5524                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5525                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5526                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5527                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5528                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5529                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5530                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5531                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5532                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5533                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5534                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5535                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5536                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5537                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5538                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5539
5540                 { 0xffff, 0, 0x00000000, 0x00000000 },
5541         };
5542
5543         ret = 0;
5544         is_5709 = 0;
5545         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5546                 is_5709 = 1;
5547
5548         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5549                 u32 offset, rw_mask, ro_mask, save_val, val;
5550                 u16 flags = reg_tbl[i].flags;
5551
5552                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5553                         continue;
5554
5555                 offset = (u32) reg_tbl[i].offset;
5556                 rw_mask = reg_tbl[i].rw_mask;
5557                 ro_mask = reg_tbl[i].ro_mask;
5558
5559                 save_val = readl(bp->regview + offset);
5560
5561                 writel(0, bp->regview + offset);
5562
5563                 val = readl(bp->regview + offset);
5564                 if ((val & rw_mask) != 0) {
5565                         goto reg_test_err;
5566                 }
5567
5568                 if ((val & ro_mask) != (save_val & ro_mask)) {
5569                         goto reg_test_err;
5570                 }
5571
5572                 writel(0xffffffff, bp->regview + offset);
5573
5574                 val = readl(bp->regview + offset);
5575                 if ((val & rw_mask) != rw_mask) {
5576                         goto reg_test_err;
5577                 }
5578
5579                 if ((val & ro_mask) != (save_val & ro_mask)) {
5580                         goto reg_test_err;
5581                 }
5582
5583                 writel(save_val, bp->regview + offset);
5584                 continue;
5585
5586 reg_test_err:
5587                 writel(save_val, bp->regview + offset);
5588                 ret = -ENODEV;
5589                 break;
5590         }
5591         return ret;
5592 }
5593
5594 static int
5595 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5596 {
5597         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5598                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5599         int i;
5600
5601         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5602                 u32 offset;
5603
5604                 for (offset = 0; offset < size; offset += 4) {
5605
5606                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5607
5608                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5609                                 test_pattern[i]) {
5610                                 return -ENODEV;
5611                         }
5612                 }
5613         }
5614         return 0;
5615 }
5616
5617 static int
5618 bnx2_test_memory(struct bnx2 *bp)
5619 {
5620         int ret = 0;
5621         int i;
5622         static struct mem_entry {
5623                 u32   offset;
5624                 u32   len;
5625         } mem_tbl_5706[] = {
5626                 { 0x60000,  0x4000 },
5627                 { 0xa0000,  0x3000 },
5628                 { 0xe0000,  0x4000 },
5629                 { 0x120000, 0x4000 },
5630                 { 0x1a0000, 0x4000 },
5631                 { 0x160000, 0x4000 },
5632                 { 0xffffffff, 0    },
5633         },
5634         mem_tbl_5709[] = {
5635                 { 0x60000,  0x4000 },
5636                 { 0xa0000,  0x3000 },
5637                 { 0xe0000,  0x4000 },
5638                 { 0x120000, 0x4000 },
5639                 { 0x1a0000, 0x4000 },
5640                 { 0xffffffff, 0    },
5641         };
5642         struct mem_entry *mem_tbl;
5643
5644         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5645                 mem_tbl = mem_tbl_5709;
5646         else
5647                 mem_tbl = mem_tbl_5706;
5648
5649         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5650                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5651                         mem_tbl[i].len)) != 0) {
5652                         return ret;
5653                 }
5654         }
5655
5656         return ret;
5657 }
5658
5659 #define BNX2_MAC_LOOPBACK       0
5660 #define BNX2_PHY_LOOPBACK       1
5661
5662 static int
5663 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5664 {
5665         unsigned int pkt_size, num_pkts, i;
5666         struct sk_buff *skb, *rx_skb;
5667         unsigned char *packet;
5668         u16 rx_start_idx, rx_idx;
5669         dma_addr_t map;
5670         struct tx_bd *txbd;
5671         struct sw_bd *rx_buf;
5672         struct l2_fhdr *rx_hdr;
5673         int ret = -ENODEV;
5674         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5675         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5676         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5677
5678         tx_napi = bnapi;
5679
5680         txr = &tx_napi->tx_ring;
5681         rxr = &bnapi->rx_ring;
5682         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5683                 bp->loopback = MAC_LOOPBACK;
5684                 bnx2_set_mac_loopback(bp);
5685         }
5686         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5687                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5688                         return 0;
5689
5690                 bp->loopback = PHY_LOOPBACK;
5691                 bnx2_set_phy_loopback(bp);
5692         }
5693         else
5694                 return -EINVAL;
5695
5696         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5697         skb = netdev_alloc_skb(bp->dev, pkt_size);
5698         if (!skb)
5699                 return -ENOMEM;
5700         packet = skb_put(skb, pkt_size);
5701         memcpy(packet, bp->dev->dev_addr, 6);
5702         memset(packet + 6, 0x0, 8);
5703         for (i = 14; i < pkt_size; i++)
5704                 packet[i] = (unsigned char) (i & 0xff);
5705
5706         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5707                 PCI_DMA_TODEVICE);
5708         if (pci_dma_mapping_error(bp->pdev, map)) {
5709                 dev_kfree_skb(skb);
5710                 return -EIO;
5711         }
5712
5713         REG_WR(bp, BNX2_HC_COMMAND,
5714                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5715
5716         REG_RD(bp, BNX2_HC_COMMAND);
5717
5718         udelay(5);
5719         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5720
5721         num_pkts = 0;
5722
5723         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5724
5725         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5726         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5727         txbd->tx_bd_mss_nbytes = pkt_size;
5728         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5729
5730         num_pkts++;
5731         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5732         txr->tx_prod_bseq += pkt_size;
5733
5734         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5735         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5736
5737         udelay(100);
5738
5739         REG_WR(bp, BNX2_HC_COMMAND,
5740                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5741
5742         REG_RD(bp, BNX2_HC_COMMAND);
5743
5744         udelay(5);
5745
5746         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5747         dev_kfree_skb(skb);
5748
5749         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5750                 goto loopback_test_done;
5751
5752         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5753         if (rx_idx != rx_start_idx + num_pkts) {
5754                 goto loopback_test_done;
5755         }
5756
5757         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5758         rx_skb = rx_buf->skb;
5759
5760         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5761         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5762
5763         pci_dma_sync_single_for_cpu(bp->pdev,
5764                 pci_unmap_addr(rx_buf, mapping),
5765                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5766
5767         if (rx_hdr->l2_fhdr_status &
5768                 (L2_FHDR_ERRORS_BAD_CRC |
5769                 L2_FHDR_ERRORS_PHY_DECODE |
5770                 L2_FHDR_ERRORS_ALIGNMENT |
5771                 L2_FHDR_ERRORS_TOO_SHORT |
5772                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5773
5774                 goto loopback_test_done;
5775         }
5776
5777         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5778                 goto loopback_test_done;
5779         }
5780
5781         for (i = 14; i < pkt_size; i++) {
5782                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5783                         goto loopback_test_done;
5784                 }
5785         }
5786
5787         ret = 0;
5788
5789 loopback_test_done:
5790         bp->loopback = 0;
5791         return ret;
5792 }
5793
5794 #define BNX2_MAC_LOOPBACK_FAILED        1
5795 #define BNX2_PHY_LOOPBACK_FAILED        2
5796 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5797                                          BNX2_PHY_LOOPBACK_FAILED)
5798
5799 static int
5800 bnx2_test_loopback(struct bnx2 *bp)
5801 {
5802         int rc = 0;
5803
5804         if (!netif_running(bp->dev))
5805                 return BNX2_LOOPBACK_FAILED;
5806
5807         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5808         spin_lock_bh(&bp->phy_lock);
5809         bnx2_init_phy(bp, 1);
5810         spin_unlock_bh(&bp->phy_lock);
5811         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5812                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5813         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5814                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5815         return rc;
5816 }
5817
5818 #define NVRAM_SIZE 0x200
5819 #define CRC32_RESIDUAL 0xdebb20e3
5820
5821 static int
5822 bnx2_test_nvram(struct bnx2 *bp)
5823 {
5824         __be32 buf[NVRAM_SIZE / 4];
5825         u8 *data = (u8 *) buf;
5826         int rc = 0;
5827         u32 magic, csum;
5828
5829         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5830                 goto test_nvram_done;
5831
5832         magic = be32_to_cpu(buf[0]);
5833         if (magic != 0x669955aa) {
5834                 rc = -ENODEV;
5835                 goto test_nvram_done;
5836         }
5837
5838         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5839                 goto test_nvram_done;
5840
5841         csum = ether_crc_le(0x100, data);
5842         if (csum != CRC32_RESIDUAL) {
5843                 rc = -ENODEV;
5844                 goto test_nvram_done;
5845         }
5846
5847         csum = ether_crc_le(0x100, data + 0x100);
5848         if (csum != CRC32_RESIDUAL) {
5849                 rc = -ENODEV;
5850         }
5851
5852 test_nvram_done:
5853         return rc;
5854 }
5855
5856 static int
5857 bnx2_test_link(struct bnx2 *bp)
5858 {
5859         u32 bmsr;
5860
5861         if (!netif_running(bp->dev))
5862                 return -ENODEV;
5863
5864         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5865                 if (bp->link_up)
5866                         return 0;
5867                 return -ENODEV;
5868         }
5869         spin_lock_bh(&bp->phy_lock);
5870         bnx2_enable_bmsr1(bp);
5871         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5872         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5873         bnx2_disable_bmsr1(bp);
5874         spin_unlock_bh(&bp->phy_lock);
5875
5876         if (bmsr & BMSR_LSTATUS) {
5877                 return 0;
5878         }
5879         return -ENODEV;
5880 }
5881
5882 static int
5883 bnx2_test_intr(struct bnx2 *bp)
5884 {
5885         int i;
5886         u16 status_idx;
5887
5888         if (!netif_running(bp->dev))
5889                 return -ENODEV;
5890
5891         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5892
5893         /* This register is not touched during run-time. */
5894         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5895         REG_RD(bp, BNX2_HC_COMMAND);
5896
5897         for (i = 0; i < 10; i++) {
5898                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5899                         status_idx) {
5900
5901                         break;
5902                 }
5903
5904                 msleep_interruptible(10);
5905         }
5906         if (i < 10)
5907                 return 0;
5908
5909         return -ENODEV;
5910 }
5911
5912 /* Determining link for parallel detection. */
5913 static int
5914 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5915 {
5916         u32 mode_ctl, an_dbg, exp;
5917
5918         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5919                 return 0;
5920
5921         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5922         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5923
5924         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5925                 return 0;
5926
5927         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5928         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5929         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5930
5931         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5932                 return 0;
5933
5934         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5935         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5936         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5937
5938         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5939                 return 0;
5940
5941         return 1;
5942 }
5943
5944 static void
5945 bnx2_5706_serdes_timer(struct bnx2 *bp)
5946 {
5947         int check_link = 1;
5948
5949         spin_lock(&bp->phy_lock);
5950         if (bp->serdes_an_pending) {
5951                 bp->serdes_an_pending--;
5952                 check_link = 0;
5953         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5954                 u32 bmcr;
5955
5956                 bp->current_interval = BNX2_TIMER_INTERVAL;
5957
5958                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5959
5960                 if (bmcr & BMCR_ANENABLE) {
5961                         if (bnx2_5706_serdes_has_link(bp)) {
5962                                 bmcr &= ~BMCR_ANENABLE;
5963                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5964                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5965                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5966                         }
5967                 }
5968         }
5969         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5970                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5971                 u32 phy2;
5972
5973                 bnx2_write_phy(bp, 0x17, 0x0f01);
5974                 bnx2_read_phy(bp, 0x15, &phy2);
5975                 if (phy2 & 0x20) {
5976                         u32 bmcr;
5977
5978                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5979                         bmcr |= BMCR_ANENABLE;
5980                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5981
5982                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5983                 }
5984         } else
5985                 bp->current_interval = BNX2_TIMER_INTERVAL;
5986
5987         if (check_link) {
5988                 u32 val;
5989
5990                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5991                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5992                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5993
5994                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5995                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5996                                 bnx2_5706s_force_link_dn(bp, 1);
5997                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5998                         } else
5999                                 bnx2_set_link(bp);
6000                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6001                         bnx2_set_link(bp);
6002         }
6003         spin_unlock(&bp->phy_lock);
6004 }
6005
6006 static void
6007 bnx2_5708_serdes_timer(struct bnx2 *bp)
6008 {
6009         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6010                 return;
6011
6012         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6013                 bp->serdes_an_pending = 0;
6014                 return;
6015         }
6016
6017         spin_lock(&bp->phy_lock);
6018         if (bp->serdes_an_pending)
6019                 bp->serdes_an_pending--;
6020         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6021                 u32 bmcr;
6022
6023                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6024                 if (bmcr & BMCR_ANENABLE) {
6025                         bnx2_enable_forced_2g5(bp);
6026                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6027                 } else {
6028                         bnx2_disable_forced_2g5(bp);
6029                         bp->serdes_an_pending = 2;
6030                         bp->current_interval = BNX2_TIMER_INTERVAL;
6031                 }
6032
6033         } else
6034                 bp->current_interval = BNX2_TIMER_INTERVAL;
6035
6036         spin_unlock(&bp->phy_lock);
6037 }
6038
6039 static void
6040 bnx2_timer(unsigned long data)
6041 {
6042         struct bnx2 *bp = (struct bnx2 *) data;
6043
6044         if (!netif_running(bp->dev))
6045                 return;
6046
6047         if (atomic_read(&bp->intr_sem) != 0)
6048                 goto bnx2_restart_timer;
6049
6050         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==