Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.3"
63 #define DRV_MODULE_RELDATE      "Dec 03, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         mutex_lock(&bp->cnic_lock);
405         cp->drv_state = 0;
406         bnapi->cnic_present = 0;
407         rcu_assign_pointer(bp->cnic_ops, NULL);
408         mutex_unlock(&bp->cnic_lock);
409         synchronize_rcu();
410         return 0;
411 }
412
413 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
414 {
415         struct bnx2 *bp = netdev_priv(dev);
416         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
417
418         cp->drv_owner = THIS_MODULE;
419         cp->chip_id = bp->chip_id;
420         cp->pdev = bp->pdev;
421         cp->io_base = bp->regview;
422         cp->drv_ctl = bnx2_drv_ctl;
423         cp->drv_register_cnic = bnx2_register_cnic;
424         cp->drv_unregister_cnic = bnx2_unregister_cnic;
425
426         return cp;
427 }
428 EXPORT_SYMBOL(bnx2_cnic_probe);
429
430 static void
431 bnx2_cnic_stop(struct bnx2 *bp)
432 {
433         struct cnic_ops *c_ops;
434         struct cnic_ctl_info info;
435
436         mutex_lock(&bp->cnic_lock);
437         c_ops = bp->cnic_ops;
438         if (c_ops) {
439                 info.cmd = CNIC_CTL_STOP_CMD;
440                 c_ops->cnic_ctl(bp->cnic_data, &info);
441         }
442         mutex_unlock(&bp->cnic_lock);
443 }
444
445 static void
446 bnx2_cnic_start(struct bnx2 *bp)
447 {
448         struct cnic_ops *c_ops;
449         struct cnic_ctl_info info;
450
451         mutex_lock(&bp->cnic_lock);
452         c_ops = bp->cnic_ops;
453         if (c_ops) {
454                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
455                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
456
457                         bnapi->cnic_tag = bnapi->last_status_idx;
458                 }
459                 info.cmd = CNIC_CTL_START_CMD;
460                 c_ops->cnic_ctl(bp->cnic_data, &info);
461         }
462         mutex_unlock(&bp->cnic_lock);
463 }
464
465 #else
466
467 static void
468 bnx2_cnic_stop(struct bnx2 *bp)
469 {
470 }
471
472 static void
473 bnx2_cnic_start(struct bnx2 *bp)
474 {
475 }
476
477 #endif
478
479 static int
480 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
481 {
482         u32 val1;
483         int i, ret;
484
485         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
486                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
488
489                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
490                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491
492                 udelay(40);
493         }
494
495         val1 = (bp->phy_addr << 21) | (reg << 16) |
496                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
497                 BNX2_EMAC_MDIO_COMM_START_BUSY;
498         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
499
500         for (i = 0; i < 50; i++) {
501                 udelay(10);
502
503                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
505                         udelay(5);
506
507                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509
510                         break;
511                 }
512         }
513
514         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
515                 *val = 0x0;
516                 ret = -EBUSY;
517         }
518         else {
519                 *val = val1;
520                 ret = 0;
521         }
522
523         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
524                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
526
527                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
528                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529
530                 udelay(40);
531         }
532
533         return ret;
534 }
535
536 static int
537 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
538 {
539         u32 val1;
540         int i, ret;
541
542         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
543                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
545
546                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
547                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548
549                 udelay(40);
550         }
551
552         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
553                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
554                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
555         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
556
557         for (i = 0; i < 50; i++) {
558                 udelay(10);
559
560                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
561                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562                         udelay(5);
563                         break;
564                 }
565         }
566
567         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
568                 ret = -EBUSY;
569         else
570                 ret = 0;
571
572         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
573                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
575
576                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
577                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578
579                 udelay(40);
580         }
581
582         return ret;
583 }
584
585 static void
586 bnx2_disable_int(struct bnx2 *bp)
587 {
588         int i;
589         struct bnx2_napi *bnapi;
590
591         for (i = 0; i < bp->irq_nvecs; i++) {
592                 bnapi = &bp->bnx2_napi[i];
593                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
594                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
595         }
596         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
597 }
598
599 static void
600 bnx2_enable_int(struct bnx2 *bp)
601 {
602         int i;
603         struct bnx2_napi *bnapi;
604
605         for (i = 0; i < bp->irq_nvecs; i++) {
606                 bnapi = &bp->bnx2_napi[i];
607
608                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
611                        bnapi->last_status_idx);
612
613                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
614                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
615                        bnapi->last_status_idx);
616         }
617         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
618 }
619
620 static void
621 bnx2_disable_int_sync(struct bnx2 *bp)
622 {
623         int i;
624
625         atomic_inc(&bp->intr_sem);
626         if (!netif_running(bp->dev))
627                 return;
628
629         bnx2_disable_int(bp);
630         for (i = 0; i < bp->irq_nvecs; i++)
631                 synchronize_irq(bp->irq_tbl[i].vector);
632 }
633
634 static void
635 bnx2_napi_disable(struct bnx2 *bp)
636 {
637         int i;
638
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 napi_disable(&bp->bnx2_napi[i].napi);
641 }
642
643 static void
644 bnx2_napi_enable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_enable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_netif_stop(struct bnx2 *bp)
654 {
655         bnx2_cnic_stop(bp);
656         bnx2_disable_int_sync(bp);
657         if (netif_running(bp->dev)) {
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
661         }
662 }
663
664 static void
665 bnx2_netif_start(struct bnx2 *bp)
666 {
667         if (atomic_dec_and_test(&bp->intr_sem)) {
668                 if (netif_running(bp->dev)) {
669                         netif_tx_wake_all_queues(bp->dev);
670                         bnx2_napi_enable(bp);
671                         bnx2_enable_int(bp);
672                         bnx2_cnic_start(bp);
673                 }
674         }
675 }
676
677 static void
678 bnx2_free_tx_mem(struct bnx2 *bp)
679 {
680         int i;
681
682         for (i = 0; i < bp->num_tx_rings; i++) {
683                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
684                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
685
686                 if (txr->tx_desc_ring) {
687                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
688                                             txr->tx_desc_ring,
689                                             txr->tx_desc_mapping);
690                         txr->tx_desc_ring = NULL;
691                 }
692                 kfree(txr->tx_buf_ring);
693                 txr->tx_buf_ring = NULL;
694         }
695 }
696
697 static void
698 bnx2_free_rx_mem(struct bnx2 *bp)
699 {
700         int i;
701
702         for (i = 0; i < bp->num_rx_rings; i++) {
703                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
704                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
705                 int j;
706
707                 for (j = 0; j < bp->rx_max_ring; j++) {
708                         if (rxr->rx_desc_ring[j])
709                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
710                                                     rxr->rx_desc_ring[j],
711                                                     rxr->rx_desc_mapping[j]);
712                         rxr->rx_desc_ring[j] = NULL;
713                 }
714                 vfree(rxr->rx_buf_ring);
715                 rxr->rx_buf_ring = NULL;
716
717                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
718                         if (rxr->rx_pg_desc_ring[j])
719                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
720                                                     rxr->rx_pg_desc_ring[j],
721                                                     rxr->rx_pg_desc_mapping[j]);
722                         rxr->rx_pg_desc_ring[j] = NULL;
723                 }
724                 vfree(rxr->rx_pg_ring);
725                 rxr->rx_pg_ring = NULL;
726         }
727 }
728
729 static int
730 bnx2_alloc_tx_mem(struct bnx2 *bp)
731 {
732         int i;
733
734         for (i = 0; i < bp->num_tx_rings; i++) {
735                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
736                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
737
738                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
739                 if (txr->tx_buf_ring == NULL)
740                         return -ENOMEM;
741
742                 txr->tx_desc_ring =
743                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
744                                              &txr->tx_desc_mapping);
745                 if (txr->tx_desc_ring == NULL)
746                         return -ENOMEM;
747         }
748         return 0;
749 }
750
751 static int
752 bnx2_alloc_rx_mem(struct bnx2 *bp)
753 {
754         int i;
755
756         for (i = 0; i < bp->num_rx_rings; i++) {
757                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
758                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
759                 int j;
760
761                 rxr->rx_buf_ring =
762                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
763                 if (rxr->rx_buf_ring == NULL)
764                         return -ENOMEM;
765
766                 memset(rxr->rx_buf_ring, 0,
767                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
768
769                 for (j = 0; j < bp->rx_max_ring; j++) {
770                         rxr->rx_desc_ring[j] =
771                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
772                                                      &rxr->rx_desc_mapping[j]);
773                         if (rxr->rx_desc_ring[j] == NULL)
774                                 return -ENOMEM;
775
776                 }
777
778                 if (bp->rx_pg_ring_size) {
779                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
780                                                   bp->rx_max_pg_ring);
781                         if (rxr->rx_pg_ring == NULL)
782                                 return -ENOMEM;
783
784                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
785                                bp->rx_max_pg_ring);
786                 }
787
788                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
789                         rxr->rx_pg_desc_ring[j] =
790                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
791                                                 &rxr->rx_pg_desc_mapping[j]);
792                         if (rxr->rx_pg_desc_ring[j] == NULL)
793                                 return -ENOMEM;
794
795                 }
796         }
797         return 0;
798 }
799
800 static void
801 bnx2_free_mem(struct bnx2 *bp)
802 {
803         int i;
804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
805
806         bnx2_free_tx_mem(bp);
807         bnx2_free_rx_mem(bp);
808
809         for (i = 0; i < bp->ctx_pages; i++) {
810                 if (bp->ctx_blk[i]) {
811                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
812                                             bp->ctx_blk[i],
813                                             bp->ctx_blk_mapping[i]);
814                         bp->ctx_blk[i] = NULL;
815                 }
816         }
817         if (bnapi->status_blk.msi) {
818                 pci_free_consistent(bp->pdev, bp->status_stats_size,
819                                     bnapi->status_blk.msi,
820                                     bp->status_blk_mapping);
821                 bnapi->status_blk.msi = NULL;
822                 bp->stats_blk = NULL;
823         }
824 }
825
826 static int
827 bnx2_alloc_mem(struct bnx2 *bp)
828 {
829         int i, status_blk_size, err;
830         struct bnx2_napi *bnapi;
831         void *status_blk;
832
833         /* Combine status and statistics blocks into one allocation. */
834         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835         if (bp->flags & BNX2_FLAG_MSIX_CAP)
836                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
838         bp->status_stats_size = status_blk_size +
839                                 sizeof(struct statistics_block);
840
841         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
842                                           &bp->status_blk_mapping);
843         if (status_blk == NULL)
844                 goto alloc_mem_err;
845
846         memset(status_blk, 0, bp->status_stats_size);
847
848         bnapi = &bp->bnx2_napi[0];
849         bnapi->status_blk.msi = status_blk;
850         bnapi->hw_tx_cons_ptr =
851                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
852         bnapi->hw_rx_cons_ptr =
853                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
854         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
855                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
856                         struct status_block_msix *sblk;
857
858                         bnapi = &bp->bnx2_napi[i];
859
860                         sblk = (void *) (status_blk +
861                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
862                         bnapi->status_blk.msix = sblk;
863                         bnapi->hw_tx_cons_ptr =
864                                 &sblk->status_tx_quick_consumer_index;
865                         bnapi->hw_rx_cons_ptr =
866                                 &sblk->status_rx_quick_consumer_index;
867                         bnapi->int_num = i << 24;
868                 }
869         }
870
871         bp->stats_blk = status_blk + status_blk_size;
872
873         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
874
875         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
876                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
877                 if (bp->ctx_pages == 0)
878                         bp->ctx_pages = 1;
879                 for (i = 0; i < bp->ctx_pages; i++) {
880                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
881                                                 BCM_PAGE_SIZE,
882                                                 &bp->ctx_blk_mapping[i]);
883                         if (bp->ctx_blk[i] == NULL)
884                                 goto alloc_mem_err;
885                 }
886         }
887
888         err = bnx2_alloc_rx_mem(bp);
889         if (err)
890                 goto alloc_mem_err;
891
892         err = bnx2_alloc_tx_mem(bp);
893         if (err)
894                 goto alloc_mem_err;
895
896         return 0;
897
898 alloc_mem_err:
899         bnx2_free_mem(bp);
900         return -ENOMEM;
901 }
902
903 static void
904 bnx2_report_fw_link(struct bnx2 *bp)
905 {
906         u32 fw_link_status = 0;
907
908         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
909                 return;
910
911         if (bp->link_up) {
912                 u32 bmsr;
913
914                 switch (bp->line_speed) {
915                 case SPEED_10:
916                         if (bp->duplex == DUPLEX_HALF)
917                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
918                         else
919                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
920                         break;
921                 case SPEED_100:
922                         if (bp->duplex == DUPLEX_HALF)
923                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
924                         else
925                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
926                         break;
927                 case SPEED_1000:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
932                         break;
933                 case SPEED_2500:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
938                         break;
939                 }
940
941                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
942
943                 if (bp->autoneg) {
944                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
945
946                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
947                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
948
949                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
950                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
951                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
952                         else
953                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
954                 }
955         }
956         else
957                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
958
959         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
960 }
961
962 static char *
963 bnx2_xceiver_str(struct bnx2 *bp)
964 {
965         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
966                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
967                  "Copper"));
968 }
969
970 static void
971 bnx2_report_link(struct bnx2 *bp)
972 {
973         if (bp->link_up) {
974                 netif_carrier_on(bp->dev);
975                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
976                        bnx2_xceiver_str(bp));
977
978                 printk("%d Mbps ", bp->line_speed);
979
980                 if (bp->duplex == DUPLEX_FULL)
981                         printk("full duplex");
982                 else
983                         printk("half duplex");
984
985                 if (bp->flow_ctrl) {
986                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
987                                 printk(", receive ");
988                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
989                                         printk("& transmit ");
990                         }
991                         else {
992                                 printk(", transmit ");
993                         }
994                         printk("flow control ON");
995                 }
996                 printk("\n");
997         }
998         else {
999                 netif_carrier_off(bp->dev);
1000                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1001                        bnx2_xceiver_str(bp));
1002         }
1003
1004         bnx2_report_fw_link(bp);
1005 }
1006
1007 static void
1008 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1009 {
1010         u32 local_adv, remote_adv;
1011
1012         bp->flow_ctrl = 0;
1013         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1014                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1015
1016                 if (bp->duplex == DUPLEX_FULL) {
1017                         bp->flow_ctrl = bp->req_flow_ctrl;
1018                 }
1019                 return;
1020         }
1021
1022         if (bp->duplex != DUPLEX_FULL) {
1023                 return;
1024         }
1025
1026         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1027             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1028                 u32 val;
1029
1030                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1031                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_TX;
1033                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_RX;
1035                 return;
1036         }
1037
1038         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1039         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040
1041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1042                 u32 new_local_adv = 0;
1043                 u32 new_remote_adv = 0;
1044
1045                 if (local_adv & ADVERTISE_1000XPAUSE)
1046                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1047                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1048                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1049                 if (remote_adv & ADVERTISE_1000XPAUSE)
1050                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1051                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1052                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1053
1054                 local_adv = new_local_adv;
1055                 remote_adv = new_remote_adv;
1056         }
1057
1058         /* See Table 28B-3 of 802.3ab-1999 spec. */
1059         if (local_adv & ADVERTISE_PAUSE_CAP) {
1060                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1061                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1062                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1063                         }
1064                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1065                                 bp->flow_ctrl = FLOW_CTRL_RX;
1066                         }
1067                 }
1068                 else {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                 }
1073         }
1074         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1075                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1076                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1077
1078                         bp->flow_ctrl = FLOW_CTRL_TX;
1079                 }
1080         }
1081 }
1082
1083 static int
1084 bnx2_5709s_linkup(struct bnx2 *bp)
1085 {
1086         u32 val, speed;
1087
1088         bp->link_up = 1;
1089
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1091         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1095                 bp->line_speed = bp->req_line_speed;
1096                 bp->duplex = bp->req_duplex;
1097                 return 0;
1098         }
1099         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1100         switch (speed) {
1101                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1102                         bp->line_speed = SPEED_10;
1103                         break;
1104                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1105                         bp->line_speed = SPEED_100;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1109                         bp->line_speed = SPEED_1000;
1110                         break;
1111                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1112                         bp->line_speed = SPEED_2500;
1113                         break;
1114         }
1115         if (val & MII_BNX2_GP_TOP_AN_FD)
1116                 bp->duplex = DUPLEX_FULL;
1117         else
1118                 bp->duplex = DUPLEX_HALF;
1119         return 0;
1120 }
1121
1122 static int
1123 bnx2_5708s_linkup(struct bnx2 *bp)
1124 {
1125         u32 val;
1126
1127         bp->link_up = 1;
1128         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1129         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1130                 case BCM5708S_1000X_STAT1_SPEED_10:
1131                         bp->line_speed = SPEED_10;
1132                         break;
1133                 case BCM5708S_1000X_STAT1_SPEED_100:
1134                         bp->line_speed = SPEED_100;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_1G:
1137                         bp->line_speed = SPEED_1000;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1140                         bp->line_speed = SPEED_2500;
1141                         break;
1142         }
1143         if (val & BCM5708S_1000X_STAT1_FD)
1144                 bp->duplex = DUPLEX_FULL;
1145         else
1146                 bp->duplex = DUPLEX_HALF;
1147
1148         return 0;
1149 }
1150
1151 static int
1152 bnx2_5706s_linkup(struct bnx2 *bp)
1153 {
1154         u32 bmcr, local_adv, remote_adv, common;
1155
1156         bp->link_up = 1;
1157         bp->line_speed = SPEED_1000;
1158
1159         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1160         if (bmcr & BMCR_FULLDPLX) {
1161                 bp->duplex = DUPLEX_FULL;
1162         }
1163         else {
1164                 bp->duplex = DUPLEX_HALF;
1165         }
1166
1167         if (!(bmcr & BMCR_ANENABLE)) {
1168                 return 0;
1169         }
1170
1171         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1172         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1173
1174         common = local_adv & remote_adv;
1175         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1176
1177                 if (common & ADVERTISE_1000XFULL) {
1178                         bp->duplex = DUPLEX_FULL;
1179                 }
1180                 else {
1181                         bp->duplex = DUPLEX_HALF;
1182                 }
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int
1189 bnx2_copper_linkup(struct bnx2 *bp)
1190 {
1191         u32 bmcr;
1192
1193         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1194         if (bmcr & BMCR_ANENABLE) {
1195                 u32 local_adv, remote_adv, common;
1196
1197                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1198                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1199
1200                 common = local_adv & (remote_adv >> 2);
1201                 if (common & ADVERTISE_1000FULL) {
1202                         bp->line_speed = SPEED_1000;
1203                         bp->duplex = DUPLEX_FULL;
1204                 }
1205                 else if (common & ADVERTISE_1000HALF) {
1206                         bp->line_speed = SPEED_1000;
1207                         bp->duplex = DUPLEX_HALF;
1208                 }
1209                 else {
1210                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1211                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1212
1213                         common = local_adv & remote_adv;
1214                         if (common & ADVERTISE_100FULL) {
1215                                 bp->line_speed = SPEED_100;
1216                                 bp->duplex = DUPLEX_FULL;
1217                         }
1218                         else if (common & ADVERTISE_100HALF) {
1219                                 bp->line_speed = SPEED_100;
1220                                 bp->duplex = DUPLEX_HALF;
1221                         }
1222                         else if (common & ADVERTISE_10FULL) {
1223                                 bp->line_speed = SPEED_10;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_10HALF) {
1227                                 bp->line_speed = SPEED_10;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else {
1231                                 bp->line_speed = 0;
1232                                 bp->link_up = 0;
1233                         }
1234                 }
1235         }
1236         else {
1237                 if (bmcr & BMCR_SPEED100) {
1238                         bp->line_speed = SPEED_100;
1239                 }
1240                 else {
1241                         bp->line_speed = SPEED_10;
1242                 }
1243                 if (bmcr & BMCR_FULLDPLX) {
1244                         bp->duplex = DUPLEX_FULL;
1245                 }
1246                 else {
1247                         bp->duplex = DUPLEX_HALF;
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void
1255 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1256 {
1257         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1258
1259         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1260         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1261         val |= 0x02 << 8;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                 u32 lo_water, hi_water;
1265
1266                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1268                 else
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1270                 if (lo_water >= bp->rx_ring_size)
1271                         lo_water = 0;
1272
1273                 hi_water = bp->rx_ring_size / 4;
1274
1275                 if (hi_water <= lo_water)
1276                         lo_water = 0;
1277
1278                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1279                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1280
1281                 if (hi_water > 0xf)
1282                         hi_water = 0xf;
1283                 else if (hi_water == 0)
1284                         lo_water = 0;
1285                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1286         }
1287         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1288 }
1289
1290 static void
1291 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1292 {
1293         int i;
1294         u32 cid;
1295
1296         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1297                 if (i == 1)
1298                         cid = RX_RSS_CID;
1299                 bnx2_init_rx_context(bp, cid);
1300         }
1301 }
1302
1303 static void
1304 bnx2_set_mac_link(struct bnx2 *bp)
1305 {
1306         u32 val;
1307
1308         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1309         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1310                 (bp->duplex == DUPLEX_HALF)) {
1311                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1312         }
1313
1314         /* Configure the EMAC mode register. */
1315         val = REG_RD(bp, BNX2_EMAC_MODE);
1316
1317         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1318                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1319                 BNX2_EMAC_MODE_25G_MODE);
1320
1321         if (bp->link_up) {
1322                 switch (bp->line_speed) {
1323                         case SPEED_10:
1324                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1325                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1326                                         break;
1327                                 }
1328                                 /* fall through */
1329                         case SPEED_100:
1330                                 val |= BNX2_EMAC_MODE_PORT_MII;
1331                                 break;
1332                         case SPEED_2500:
1333                                 val |= BNX2_EMAC_MODE_25G_MODE;
1334                                 /* fall through */
1335                         case SPEED_1000:
1336                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1337                                 break;
1338                 }
1339         }
1340         else {
1341                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342         }
1343
1344         /* Set the MAC to operate in the appropriate duplex mode. */
1345         if (bp->duplex == DUPLEX_HALF)
1346                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1347         REG_WR(bp, BNX2_EMAC_MODE, val);
1348
1349         /* Enable/disable rx PAUSE. */
1350         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1351
1352         if (bp->flow_ctrl & FLOW_CTRL_RX)
1353                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1354         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1355
1356         /* Enable/disable tx PAUSE. */
1357         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1358         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_TX)
1361                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1362         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1363
1364         /* Acknowledge the interrupt. */
1365         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1366
1367         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1368                 bnx2_init_all_rx_contexts(bp);
1369 }
1370
1371 static void
1372 bnx2_enable_bmsr1(struct bnx2 *bp)
1373 {
1374         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1375             (CHIP_NUM(bp) == CHIP_NUM_5709))
1376                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1377                                MII_BNX2_BLK_ADDR_GP_STATUS);
1378 }
1379
1380 static void
1381 bnx2_disable_bmsr1(struct bnx2 *bp)
1382 {
1383         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384             (CHIP_NUM(bp) == CHIP_NUM_5709))
1385                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1387 }
1388
1389 static int
1390 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1391 {
1392         u32 up1;
1393         int ret = 1;
1394
1395         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1396                 return 0;
1397
1398         if (bp->autoneg & AUTONEG_SPEED)
1399                 bp->advertising |= ADVERTISED_2500baseX_Full;
1400
1401         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1402                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1403
1404         bnx2_read_phy(bp, bp->mii_up1, &up1);
1405         if (!(up1 & BCM5708S_UP1_2G5)) {
1406                 up1 |= BCM5708S_UP1_2G5;
1407                 bnx2_write_phy(bp, bp->mii_up1, up1);
1408                 ret = 0;
1409         }
1410
1411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1413                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1414
1415         return ret;
1416 }
1417
1418 static int
1419 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1420 {
1421         u32 up1;
1422         int ret = 0;
1423
1424         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1425                 return 0;
1426
1427         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1428                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1429
1430         bnx2_read_phy(bp, bp->mii_up1, &up1);
1431         if (up1 & BCM5708S_UP1_2G5) {
1432                 up1 &= ~BCM5708S_UP1_2G5;
1433                 bnx2_write_phy(bp, bp->mii_up1, up1);
1434                 ret = 1;
1435         }
1436
1437         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1438                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1439                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1440
1441         return ret;
1442 }
1443
1444 static void
1445 bnx2_enable_forced_2g5(struct bnx2 *bp)
1446 {
1447         u32 bmcr;
1448
1449         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1450                 return;
1451
1452         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1453                 u32 val;
1454
1455                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1457                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1458                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1459                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1460                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1461
1462                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1464                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465
1466         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469         } else {
1470                 return;
1471         }
1472
1473         if (bp->autoneg & AUTONEG_SPEED) {
1474                 bmcr &= ~BMCR_ANENABLE;
1475                 if (bp->req_duplex == DUPLEX_FULL)
1476                         bmcr |= BMCR_FULLDPLX;
1477         }
1478         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1479 }
1480
1481 static void
1482 bnx2_disable_forced_2g5(struct bnx2 *bp)
1483 {
1484         u32 bmcr;
1485
1486         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1487                 return;
1488
1489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1490                 u32 val;
1491
1492                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1494                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1495                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1496                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1497
1498                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1499                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1500                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501
1502         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1503                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505         } else {
1506                 return;
1507         }
1508
1509         if (bp->autoneg & AUTONEG_SPEED)
1510                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517         u32 val;
1518
1519         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521         if (start)
1522                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523         else
1524                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530         u32 bmsr;
1531         u8 link_up;
1532
1533         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534                 bp->link_up = 1;
1535                 return 0;
1536         }
1537
1538         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539                 return 0;
1540
1541         link_up = bp->link_up;
1542
1543         bnx2_enable_bmsr1(bp);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_disable_bmsr1(bp);
1547
1548         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550                 u32 val, an_dbg;
1551
1552                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553                         bnx2_5706s_force_link_dn(bp, 0);
1554                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555                 }
1556                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564                         bmsr |= BMSR_LSTATUS;
1565                 else
1566                         bmsr &= ~BMSR_LSTATUS;
1567         }
1568
1569         if (bmsr & BMSR_LSTATUS) {
1570                 bp->link_up = 1;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574                                 bnx2_5706s_linkup(bp);
1575                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576                                 bnx2_5708s_linkup(bp);
1577                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578                                 bnx2_5709s_linkup(bp);
1579                 }
1580                 else {
1581                         bnx2_copper_linkup(bp);
1582                 }
1583                 bnx2_resolve_flow_ctrl(bp);
1584         }
1585         else {
1586                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587                     (bp->autoneg & AUTONEG_SPEED))
1588                         bnx2_disable_forced_2g5(bp);
1589
1590                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591                         u32 bmcr;
1592
1593                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594                         bmcr |= BMCR_ANENABLE;
1595                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598                 }
1599                 bp->link_up = 0;
1600         }
1601
1602         if (bp->link_up != link_up) {
1603                 bnx2_report_link(bp);
1604         }
1605
1606         bnx2_set_mac_link(bp);
1607
1608         return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614         int i;
1615         u32 reg;
1616
1617         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621                 udelay(10);
1622
1623                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624                 if (!(reg & BMCR_RESET)) {
1625                         udelay(20);
1626                         break;
1627                 }
1628         }
1629         if (i == PHY_RESET_MAX_WAIT) {
1630                 return -EBUSY;
1631         }
1632         return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638         u32 adv = 0;
1639
1640         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPAUSE;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_CAP;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676         u32 speed_arg = 0, pause_adv;
1677
1678         pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680         if (bp->autoneg & AUTONEG_SPEED) {
1681                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682                 if (bp->advertising & ADVERTISED_10baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684                 if (bp->advertising & ADVERTISED_10baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686                 if (bp->advertising & ADVERTISED_100baseT_Half)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688                 if (bp->advertising & ADVERTISED_100baseT_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694         } else {
1695                 if (bp->req_line_speed == SPEED_2500)
1696                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697                 else if (bp->req_line_speed == SPEED_1000)
1698                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 else if (bp->req_line_speed == SPEED_100) {
1700                         if (bp->req_duplex == DUPLEX_FULL)
1701                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702                         else
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704                 } else if (bp->req_line_speed == SPEED_10) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 }
1710         }
1711
1712         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717         if (port == PORT_TP)
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723         spin_unlock_bh(&bp->phy_lock);
1724         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725         spin_lock_bh(&bp->phy_lock);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735         u32 adv, bmcr;
1736         u32 new_adv = 0;
1737
1738         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739                 return (bnx2_setup_remote_phy(bp, port));
1740
1741         if (!(bp->autoneg & AUTONEG_SPEED)) {
1742                 u32 new_bmcr;
1743                 int force_link_down = 0;
1744
1745                 if (bp->req_line_speed == SPEED_2500) {
1746                         if (!bnx2_test_and_enable_2g5(bp))
1747                                 force_link_down = 1;
1748                 } else if (bp->req_line_speed == SPEED_1000) {
1749                         if (bnx2_test_and_disable_2g5(bp))
1750                                 force_link_down = 1;
1751                 }
1752                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757                 new_bmcr |= BMCR_SPEED1000;
1758
1759                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 bnx2_enable_forced_2g5(bp);
1762                         else if (bp->req_line_speed == SPEED_1000) {
1763                                 bnx2_disable_forced_2g5(bp);
1764                                 new_bmcr &= ~0x2000;
1765                         }
1766
1767                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770                         else
1771                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772                 }
1773
1774                 if (bp->req_duplex == DUPLEX_FULL) {
1775                         adv |= ADVERTISE_1000XFULL;
1776                         new_bmcr |= BMCR_FULLDPLX;
1777                 }
1778                 else {
1779                         adv |= ADVERTISE_1000XHALF;
1780                         new_bmcr &= ~BMCR_FULLDPLX;
1781                 }
1782                 if ((new_bmcr != bmcr) || (force_link_down)) {
1783                         /* Force a link down visible on the other side */
1784                         if (bp->link_up) {
1785                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1786                                                ~(ADVERTISE_1000XFULL |
1787                                                  ADVERTISE_1000XHALF));
1788                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789                                         BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791                                 bp->link_up = 0;
1792                                 netif_carrier_off(bp->dev);
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                                 bnx2_report_link(bp);
1795                         }
1796                         bnx2_write_phy(bp, bp->mii_adv, adv);
1797                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798                 } else {
1799                         bnx2_resolve_flow_ctrl(bp);
1800                         bnx2_set_mac_link(bp);
1801                 }
1802                 return 0;
1803         }
1804
1805         bnx2_test_and_enable_2g5(bp);
1806
1807         if (bp->advertising & ADVERTISED_1000baseT_Full)
1808                 new_adv |= ADVERTISE_1000XFULL;
1809
1810         new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812         bnx2_read_phy(bp, bp->mii_adv, &adv);
1813         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815         bp->serdes_an_pending = 0;
1816         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817                 /* Force a link down visible on the other side */
1818                 if (bp->link_up) {
1819                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820                         spin_unlock_bh(&bp->phy_lock);
1821                         msleep(20);
1822                         spin_lock_bh(&bp->phy_lock);
1823                 }
1824
1825                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827                         BMCR_ANENABLE);
1828                 /* Speed up link-up time when the link partner
1829                  * does not autonegotiate which is very common
1830                  * in blade servers. Some blade servers use
1831                  * IPMI for kerboard input and it's important
1832                  * to minimize link disruptions. Autoneg. involves
1833                  * exchanging base pages plus 3 next pages and
1834                  * normally completes in about 120 msec.
1835                  */
1836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837                 bp->serdes_an_pending = 1;
1838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839         } else {
1840                 bnx2_resolve_flow_ctrl(bp);
1841                 bnx2_set_mac_link(bp);
1842         }
1843
1844         return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1848         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1849                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850                 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1853         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1854         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1855         ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865         u32 link;
1866
1867         if (bp->phy_port == PORT_TP)
1868                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869         else
1870                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873                 bp->req_line_speed = 0;
1874                 bp->autoneg |= AUTONEG_SPEED;
1875                 bp->advertising = ADVERTISED_Autoneg;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877                         bp->advertising |= ADVERTISED_10baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879                         bp->advertising |= ADVERTISED_10baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881                         bp->advertising |= ADVERTISED_100baseT_Half;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883                         bp->advertising |= ADVERTISED_100baseT_Full;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885                         bp->advertising |= ADVERTISED_1000baseT_Full;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887                         bp->advertising |= ADVERTISED_2500baseX_Full;
1888         } else {
1889                 bp->autoneg = 0;
1890                 bp->advertising = 0;
1891                 bp->req_duplex = DUPLEX_FULL;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893                         bp->req_line_speed = SPEED_10;
1894                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895                                 bp->req_duplex = DUPLEX_HALF;
1896                 }
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898                         bp->req_line_speed = SPEED_100;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903                         bp->req_line_speed = SPEED_1000;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905                         bp->req_line_speed = SPEED_2500;
1906         }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913                 bnx2_set_default_remote_link(bp);
1914                 return;
1915         }
1916
1917         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918         bp->req_line_speed = 0;
1919         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920                 u32 reg;
1921
1922                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927                         bp->autoneg = 0;
1928                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1929                         bp->req_duplex = DUPLEX_FULL;
1930                 }
1931         } else
1932                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938         u32 msg;
1939         u32 addr;
1940
1941         spin_lock(&bp->indirect_lock);
1942         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946         spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952         u32 msg;
1953         u8 link_up = bp->link_up;
1954         u8 old_port;
1955
1956         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959                 bnx2_send_heart_beat(bp);
1960
1961         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964                 bp->link_up = 0;
1965         else {
1966                 u32 speed;
1967
1968                 bp->link_up = 1;
1969                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970                 bp->duplex = DUPLEX_FULL;
1971                 switch (speed) {
1972                         case BNX2_LINK_STATUS_10HALF:
1973                                 bp->duplex = DUPLEX_HALF;
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_100BASE_T4:
1980                         case BNX2_LINK_STATUS_100FULL:
1981                                 bp->line_speed = SPEED_100;
1982                                 break;
1983                         case BNX2_LINK_STATUS_1000HALF:
1984                                 bp->duplex = DUPLEX_HALF;
1985                         case BNX2_LINK_STATUS_1000FULL:
1986                                 bp->line_speed = SPEED_1000;
1987                                 break;
1988                         case BNX2_LINK_STATUS_2500HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_2500FULL:
1991                                 bp->line_speed = SPEED_2500;
1992                                 break;
1993                         default:
1994                                 bp->line_speed = 0;
1995                                 break;
1996                 }
1997
1998                 bp->flow_ctrl = 0;
1999                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001                         if (bp->duplex == DUPLEX_FULL)
2002                                 bp->flow_ctrl = bp->req_flow_ctrl;
2003                 } else {
2004                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2006                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2008                 }
2009
2010                 old_port = bp->phy_port;
2011                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012                         bp->phy_port = PORT_FIBRE;
2013                 else
2014                         bp->phy_port = PORT_TP;
2015
2016                 if (old_port != bp->phy_port)
2017                         bnx2_set_default_link(bp);
2018
2019         }
2020         if (bp->link_up != link_up)
2021                 bnx2_report_link(bp);
2022
2023         bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029         u32 evt_code;
2030
2031         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032         switch (evt_code) {
2033                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034                         bnx2_remote_phy_event(bp);
2035                         break;
2036                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037                 default:
2038                         bnx2_send_heart_beat(bp);
2039                         break;
2040         }
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049         u32 bmcr;
2050         u32 new_bmcr;
2051
2052         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054         if (bp->autoneg & AUTONEG_SPEED) {
2055                 u32 adv_reg, adv1000_reg;
2056                 u32 new_adv_reg = 0;
2057                 u32 new_adv1000_reg = 0;
2058
2059                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061                         ADVERTISE_PAUSE_ASYM);
2062
2063                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064                 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066                 if (bp->advertising & ADVERTISED_10baseT_Half)
2067                         new_adv_reg |= ADVERTISE_10HALF;
2068                 if (bp->advertising & ADVERTISED_10baseT_Full)
2069                         new_adv_reg |= ADVERTISE_10FULL;
2070                 if (bp->advertising & ADVERTISED_100baseT_Half)
2071                         new_adv_reg |= ADVERTISE_100HALF;
2072                 if (bp->advertising & ADVERTISED_100baseT_Full)
2073                         new_adv_reg |= ADVERTISE_100FULL;
2074                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075                         new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077                 new_adv_reg |= ADVERTISE_CSMA;
2078
2079                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081                 if ((adv1000_reg != new_adv1000_reg) ||
2082                         (adv_reg != new_adv_reg) ||
2083                         ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088                                 BMCR_ANENABLE);
2089                 }
2090                 else if (bp->link_up) {
2091                         /* Flow ctrl may have changed from auto to forced */
2092                         /* or vice-versa. */
2093
2094                         bnx2_resolve_flow_ctrl(bp);
2095                         bnx2_set_mac_link(bp);
2096                 }
2097                 return 0;
2098         }
2099
2100         new_bmcr = 0;
2101         if (bp->req_line_speed == SPEED_100) {
2102                 new_bmcr |= BMCR_SPEED100;
2103         }
2104         if (bp->req_duplex == DUPLEX_FULL) {
2105                 new_bmcr |= BMCR_FULLDPLX;
2106         }
2107         if (new_bmcr != bmcr) {
2108                 u32 bmsr;
2109
2110                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113                 if (bmsr & BMSR_LSTATUS) {
2114                         /* Force link down */
2115                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116                         spin_unlock_bh(&bp->phy_lock);
2117                         msleep(50);
2118                         spin_lock_bh(&bp->phy_lock);
2119
2120                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                 }
2123
2124                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126                 /* Normally, the new speed is setup after the link has
2127                  * gone down and up again. In some cases, link will not go
2128                  * down so we need to set up the new speed here.
2129                  */
2130                 if (bmsr & BMSR_LSTATUS) {
2131                         bp->line_speed = bp->req_line_speed;
2132                         bp->duplex = bp->req_duplex;
2133                         bnx2_resolve_flow_ctrl(bp);
2134                         bnx2_set_mac_link(bp);
2135                 }
2136         } else {
2137                 bnx2_resolve_flow_ctrl(bp);
2138                 bnx2_set_mac_link(bp);
2139         }
2140         return 0;
2141 }
2142
2143 static int
2144 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145 __releases(&bp->phy_lock)
2146 __acquires(&bp->phy_lock)
2147 {
2148         if (bp->loopback == MAC_LOOPBACK)
2149                 return 0;
2150
2151         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152                 return (bnx2_setup_serdes_phy(bp, port));
2153         }
2154         else {
2155                 return (bnx2_setup_copper_phy(bp));
2156         }
2157 }
2158
2159 static int
2160 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161 {
2162         u32 val;
2163
2164         bp->mii_bmcr = MII_BMCR + 0x10;
2165         bp->mii_bmsr = MII_BMSR + 0x10;
2166         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167         bp->mii_adv = MII_ADVERTISE + 0x10;
2168         bp->mii_lpa = MII_LPA + 0x10;
2169         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175         if (reset_phy)
2176                 bnx2_reset_phy(bp);
2177
2178         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188                 val |= BCM5708S_UP1_2G5;
2189         else
2190                 val &= ~BCM5708S_UP1_2G5;
2191         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206         return 0;
2207 }
2208
2209 static int
2210 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211 {
2212         u32 val;
2213
2214         if (reset_phy)
2215                 bnx2_reset_phy(bp);
2216
2217         bp->mii_up1 = BCM5708S_UP1;
2218
2219         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233                 val |= BCM5708S_UP1_2G5;
2234                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235         }
2236
2237         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240                 /* increase tx signal amplitude */
2241                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242                                BCM5708S_BLK_ADDR_TX_MISC);
2243                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247         }
2248
2249         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252         if (val) {
2253                 u32 is_backplane;
2254
2255                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258                                        BCM5708S_BLK_ADDR_TX_MISC);
2259                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                        BCM5708S_BLK_ADDR_DIG);
2262                 }
2263         }
2264         return 0;
2265 }
2266
2267 static int
2268 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 {
2270         if (reset_phy)
2271                 bnx2_reset_phy(bp);
2272
2273         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278         if (bp->dev->mtu > 1500) {
2279                 u32 val;
2280
2281                 /* Set extended packet length bit */
2282                 bnx2_write_phy(bp, 0x18, 0x7);
2283                 bnx2_read_phy(bp, 0x18, &val);
2284                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287                 bnx2_read_phy(bp, 0x1c, &val);
2288                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289         }
2290         else {
2291                 u32 val;
2292
2293                 bnx2_write_phy(bp, 0x18, 0x7);
2294                 bnx2_read_phy(bp, 0x18, &val);
2295                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298                 bnx2_read_phy(bp, 0x1c, &val);
2299                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int
2306 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307 {
2308         u32 val;
2309
2310         if (reset_phy)
2311                 bnx2_reset_phy(bp);
2312
2313         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314                 bnx2_write_phy(bp, 0x18, 0x0c00);
2315                 bnx2_write_phy(bp, 0x17, 0x000a);
2316                 bnx2_write_phy(bp, 0x15, 0x310b);
2317                 bnx2_write_phy(bp, 0x17, 0x201f);
2318                 bnx2_write_phy(bp, 0x15, 0x9506);
2319                 bnx2_write_phy(bp, 0x17, 0x401f);
2320                 bnx2_write_phy(bp, 0x15, 0x14e2);
2321                 bnx2_write_phy(bp, 0x18, 0x0400);
2322         }
2323
2324         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2327                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328                 val &= ~(1 << 8);
2329                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330         }
2331
2332         if (bp->dev->mtu > 1500) {
2333                 /* Set extended packet length bit */
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val | 0x1);
2340         }
2341         else {
2342                 bnx2_write_phy(bp, 0x18, 0x7);
2343                 bnx2_read_phy(bp, 0x18, &val);
2344                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346                 bnx2_read_phy(bp, 0x10, &val);
2347                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348         }
2349
2350         /* ethernet@wirespeed */
2351         bnx2_write_phy(bp, 0x18, 0x7007);
2352         bnx2_read_phy(bp, 0x18, &val);
2353         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354         return 0;
2355 }
2356
2357
2358 static int
2359 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360 __releases(&bp->phy_lock)
2361 __acquires(&bp->phy_lock)
2362 {
2363         u32 val;
2364         int rc = 0;
2365
2366         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369         bp->mii_bmcr = MII_BMCR;
2370         bp->mii_bmsr = MII_BMSR;
2371         bp->mii_bmsr1 = MII_BMSR;
2372         bp->mii_adv = MII_ADVERTISE;
2373         bp->mii_lpa = MII_LPA;
2374
2375         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378                 goto setup_phy;
2379
2380         bnx2_read_phy(bp, MII_PHYSID1, &val);
2381         bp->phy_id = val << 16;
2382         bnx2_read_phy(bp, MII_PHYSID2, &val);
2383         bp->phy_id |= val & 0xffff;
2384
2385         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2388                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2390                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2392         }
2393         else {
2394                 rc = bnx2_init_copper_phy(bp, reset_phy);
2395         }
2396
2397 setup_phy:
2398         if (!rc)
2399                 rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401         return rc;
2402 }
2403
2404 static int
2405 bnx2_set_mac_loopback(struct bnx2 *bp)
2406 {
2407         u32 mac_mode;
2408
2409         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413         bp->link_up = 1;
2414         return 0;
2415 }
2416
2417 static int bnx2_test_link(struct bnx2 *);
2418
2419 static int
2420 bnx2_set_phy_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423         int rc, i;
2424
2425         spin_lock_bh(&bp->phy_lock);
2426         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427                             BMCR_SPEED1000);
2428         spin_unlock_bh(&bp->phy_lock);
2429         if (rc)
2430                 return rc;
2431
2432         for (i = 0; i < 10; i++) {
2433                 if (bnx2_test_link(bp) == 0)
2434                         break;
2435                 msleep(100);
2436         }
2437
2438         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441                       BNX2_EMAC_MODE_25G_MODE);
2442
2443         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445         bp->link_up = 1;
2446         return 0;
2447 }
2448
2449 static int
2450 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2451 {
2452         int i;
2453         u32 val;
2454
2455         bp->fw_wr_seq++;
2456         msg_data |= bp->fw_wr_seq;
2457
2458         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2459
2460         if (!ack)
2461                 return 0;
2462
2463         /* wait for an acknowledgement. */
2464         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2465                 msleep(10);
2466
2467                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2468
2469                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2470                         break;
2471         }
2472         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2473                 return 0;
2474
2475         /* If we timed out, inform the firmware that this is the case. */
2476         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2477                 if (!silent)
2478                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2479                                             "%x\n", msg_data);
2480
2481                 msg_data &= ~BNX2_DRV_MSG_CODE;
2482                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2483
2484                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2485
2486                 return -EBUSY;
2487         }
2488
2489         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490                 return -EIO;
2491
2492         return 0;
2493 }
2494
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2497 {
2498         int i, ret = 0;
2499         u32 val;
2500
2501         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502         val |= (BCM_PAGE_BITS - 8) << 16;
2503         REG_WR(bp, BNX2_CTX_COMMAND, val);
2504         for (i = 0; i < 10; i++) {
2505                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507                         break;
2508                 udelay(2);
2509         }
2510         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511                 return -EBUSY;
2512
2513         for (i = 0; i < bp->ctx_pages; i++) {
2514                 int j;
2515
2516                 if (bp->ctx_blk[i])
2517                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518                 else
2519                         return -ENOMEM;
2520
2521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525                        (u64) bp->ctx_blk_mapping[i] >> 32);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528                 for (j = 0; j < 10; j++) {
2529
2530                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532                                 break;
2533                         udelay(5);
2534                 }
2535                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536                         ret = -EBUSY;
2537                         break;
2538                 }
2539         }
2540         return ret;
2541 }
2542
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2545 {
2546         u32 vcid;
2547
2548         vcid = 96;
2549         while (vcid) {
2550                 u32 vcid_addr, pcid_addr, offset;
2551                 int i;
2552
2553                 vcid--;
2554
2555                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556                         u32 new_vcid;
2557
2558                         vcid_addr = GET_PCID_ADDR(vcid);
2559                         if (vcid & 0x8) {
2560                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2561                         }
2562                         else {
2563                                 new_vcid = vcid;
2564                         }
2565                         pcid_addr = GET_PCID_ADDR(new_vcid);
2566                 }
2567                 else {
2568                         vcid_addr = GET_CID_ADDR(vcid);
2569                         pcid_addr = vcid_addr;
2570                 }
2571
2572                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573                         vcid_addr += (i << PHY_CTX_SHIFT);
2574                         pcid_addr += (i << PHY_CTX_SHIFT);
2575
2576                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2578
2579                         /* Zero out the context. */
2580                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2582                 }
2583         }
2584 }
2585
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 {
2589         u16 *good_mbuf;
2590         u32 good_mbuf_cnt;
2591         u32 val;
2592
2593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594         if (good_mbuf == NULL) {
2595                 printk(KERN_ERR PFX "Failed to allocate memory in "
2596                                     "bnx2_alloc_bad_rbuf\n");
2597                 return -ENOMEM;
2598         }
2599
2600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2602
2603         good_mbuf_cnt = 0;
2604
2605         /* Allocate a bunch of mbufs and save the good ones in an array. */
2606         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2608                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2609                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2610
2611                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2612
2613                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2614
2615                 /* The addresses with Bit 9 set are bad memory blocks. */
2616                 if (!(val & (1 << 9))) {
2617                         good_mbuf[good_mbuf_cnt] = (u16) val;
2618                         good_mbuf_cnt++;
2619                 }
2620
2621                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2622         }
2623
2624         /* Free the good ones back to the mbuf pool thus discarding
2625          * all the bad ones. */
2626         while (good_mbuf_cnt) {
2627                 good_mbuf_cnt--;
2628
2629                 val = good_mbuf[good_mbuf_cnt];
2630                 val = (val << 9) | val | 1;
2631
2632                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2633         }
2634         kfree(good_mbuf);
2635         return 0;
2636 }
2637
2638 static void
2639 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2640 {
2641         u32 val;
2642
2643         val = (mac_addr[0] << 8) | mac_addr[1];
2644
2645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2646
2647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2648                 (mac_addr[4] << 8) | mac_addr[5];
2649
2650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2651 }
2652
2653 static inline int
2654 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2655 {
2656         dma_addr_t mapping;
2657         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2658         struct rx_bd *rxbd =
2659                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2660         struct page *page = alloc_page(GFP_ATOMIC);
2661
2662         if (!page)
2663                 return -ENOMEM;
2664         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2665                                PCI_DMA_FROMDEVICE);
2666         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2667                 __free_page(page);
2668                 return -EIO;
2669         }
2670
2671         rx_pg->page = page;
2672         pci_unmap_addr_set(rx_pg, mapping, mapping);
2673         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2674         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2675         return 0;
2676 }
2677
2678 static void
2679 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2680 {
2681         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2682         struct page *page = rx_pg->page;
2683
2684         if (!page)
2685                 return;
2686
2687         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2688                        PCI_DMA_FROMDEVICE);
2689
2690         __free_page(page);
2691         rx_pg->page = NULL;
2692 }
2693
2694 static inline int
2695 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2696 {
2697         struct sk_buff *skb;
2698         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2699         dma_addr_t mapping;
2700         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2701         unsigned long align;
2702
2703         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2704         if (skb == NULL) {
2705                 return -ENOMEM;
2706         }
2707
2708         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2709                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2710
2711         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2712                 PCI_DMA_FROMDEVICE);
2713         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2714                 dev_kfree_skb(skb);
2715                 return -EIO;
2716         }
2717
2718         rx_buf->skb = skb;
2719         pci_unmap_addr_set(rx_buf, mapping, mapping);
2720
2721         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2722         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2723
2724         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2725
2726         return 0;
2727 }
2728
2729 static int
2730 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2731 {
2732         struct status_block *sblk = bnapi->status_blk.msi;
2733         u32 new_link_state, old_link_state;
2734         int is_set = 1;
2735
2736         new_link_state = sblk->status_attn_bits & event;
2737         old_link_state = sblk->status_attn_bits_ack & event;
2738         if (new_link_state != old_link_state) {
2739                 if (new_link_state)
2740                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2741                 else
2742                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2743         } else
2744                 is_set = 0;
2745
2746         return is_set;
2747 }
2748
2749 static void
2750 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2751 {
2752         spin_lock(&bp->phy_lock);
2753
2754         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2755                 bnx2_set_link(bp);
2756         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2757                 bnx2_set_remote_link(bp);
2758
2759         spin_unlock(&bp->phy_lock);
2760
2761 }
2762
2763 static inline u16
2764 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2765 {
2766         u16 cons;
2767
2768         /* Tell compiler that status block fields can change. */
2769         barrier();
2770         cons = *bnapi->hw_tx_cons_ptr;
2771         barrier();
2772         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2773                 cons++;
2774         return cons;
2775 }
2776
2777 static int
2778 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2779 {
2780         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2781         u16 hw_cons, sw_cons, sw_ring_cons;
2782         int tx_pkt = 0, index;
2783         struct netdev_queue *txq;
2784
2785         index = (bnapi - bp->bnx2_napi);
2786         txq = netdev_get_tx_queue(bp->dev, index);
2787
2788         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2789         sw_cons = txr->tx_cons;
2790
2791         while (sw_cons != hw_cons) {
2792                 struct sw_tx_bd *tx_buf;
2793                 struct sk_buff *skb;
2794                 int i, last;
2795
2796                 sw_ring_cons = TX_RING_IDX(sw_cons);
2797
2798                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2799                 skb = tx_buf->skb;
2800
2801                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2802                 prefetch(&skb->end);
2803
2804                 /* partial BD completions possible with TSO packets */
2805                 if (tx_buf->is_gso) {
2806                         u16 last_idx, last_ring_idx;
2807
2808                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2809                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2810                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2811                                 last_idx++;
2812                         }
2813                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2814                                 break;
2815                         }
2816                 }
2817
2818                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2819                         skb_headlen(skb), PCI_DMA_TODEVICE);
2820
2821                 tx_buf->skb = NULL;
2822                 last = tx_buf->nr_frags;
2823
2824                 for (i = 0; i < last; i++) {
2825                         sw_cons = NEXT_TX_BD(sw_cons);
2826
2827                         pci_unmap_page(bp->pdev,
2828                                 pci_unmap_addr(
2829                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2830                                         mapping),
2831                                 skb_shinfo(skb)->frags[i].size,
2832                                 PCI_DMA_TODEVICE);
2833                 }
2834
2835                 sw_cons = NEXT_TX_BD(sw_cons);
2836
2837                 dev_kfree_skb(skb);
2838                 tx_pkt++;
2839                 if (tx_pkt == budget)
2840                         break;
2841
2842                 if (hw_cons == sw_cons)
2843                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2844         }
2845
2846         txr->hw_tx_cons = hw_cons;
2847         txr->tx_cons = sw_cons;
2848
2849         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2850          * before checking for netif_tx_queue_stopped().  Without the
2851          * memory barrier, there is a small possibility that bnx2_start_xmit()
2852          * will miss it and cause the queue to be stopped forever.
2853          */
2854         smp_mb();
2855
2856         if (unlikely(netif_tx_queue_stopped(txq)) &&
2857                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2858                 __netif_tx_lock(txq, smp_processor_id());
2859                 if ((netif_tx_queue_stopped(txq)) &&
2860                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2861                         netif_tx_wake_queue(txq);
2862                 __netif_tx_unlock(txq);
2863         }
2864
2865         return tx_pkt;
2866 }
2867
2868 static void
2869 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2870                         struct sk_buff *skb, int count)
2871 {
2872         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2873         struct rx_bd *cons_bd, *prod_bd;
2874         int i;
2875         u16 hw_prod, prod;
2876         u16 cons = rxr->rx_pg_cons;
2877
2878         cons_rx_pg = &rxr->rx_pg_ring[cons];
2879
2880         /* The caller was unable to allocate a new page to replace the
2881          * last one in the frags array, so we need to recycle that page
2882          * and then free the skb.
2883          */
2884         if (skb) {
2885                 struct page *page;
2886                 struct skb_shared_info *shinfo;
2887
2888                 shinfo = skb_shinfo(skb);
2889                 shinfo->nr_frags--;
2890                 page = shinfo->frags[shinfo->nr_frags].page;
2891                 shinfo->frags[shinfo->nr_frags].page = NULL;
2892
2893                 cons_rx_pg->page = page;
2894                 dev_kfree_skb(skb);
2895         }
2896
2897         hw_prod = rxr->rx_pg_prod;
2898
2899         for (i = 0; i < count; i++) {
2900                 prod = RX_PG_RING_IDX(hw_prod);
2901
2902                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2903                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2904                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2905                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2906
2907                 if (prod != cons) {
2908                         prod_rx_pg->page = cons_rx_pg->page;
2909                         cons_rx_pg->page = NULL;
2910                         pci_unmap_addr_set(prod_rx_pg, mapping,
2911                                 pci_unmap_addr(cons_rx_pg, mapping));
2912
2913                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2914                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2915
2916                 }
2917                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2918                 hw_prod = NEXT_RX_BD(hw_prod);
2919         }
2920         rxr->rx_pg_prod = hw_prod;
2921         rxr->rx_pg_cons = cons;
2922 }
2923
2924 static inline void
2925 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2926                   struct sk_buff *skb, u16 cons, u16 prod)
2927 {
2928         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2929         struct rx_bd *cons_bd, *prod_bd;
2930
2931         cons_rx_buf = &rxr->rx_buf_ring[cons];
2932         prod_rx_buf = &rxr->rx_buf_ring[prod];
2933
2934         pci_dma_sync_single_for_device(bp->pdev,
2935                 pci_unmap_addr(cons_rx_buf, mapping),
2936                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2937
2938         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2939
2940         prod_rx_buf->skb = skb;
2941
2942         if (cons == prod)
2943                 return;
2944
2945         pci_unmap_addr_set(prod_rx_buf, mapping,
2946                         pci_unmap_addr(cons_rx_buf, mapping));
2947
2948         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2949         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2950         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2952 }
2953
2954 static int
2955 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2956             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2957             u32 ring_idx)
2958 {
2959         int err;
2960         u16 prod = ring_idx & 0xffff;
2961
2962         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2963         if (unlikely(err)) {
2964                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2965                 if (hdr_len) {
2966                         unsigned int raw_len = len + 4;
2967                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2968
2969                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2970                 }
2971                 return err;
2972         }
2973
2974         skb_reserve(skb, BNX2_RX_OFFSET);
2975         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2976                          PCI_DMA_FROMDEVICE);
2977
2978         if (hdr_len == 0) {
2979                 skb_put(skb, len);
2980                 return 0;
2981         } else {
2982                 unsigned int i, frag_len, frag_size, pages;
2983                 struct sw_pg *rx_pg;
2984                 u16 pg_cons = rxr->rx_pg_cons;
2985                 u16 pg_prod = rxr->rx_pg_prod;
2986
2987                 frag_size = len + 4 - hdr_len;
2988                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2989                 skb_put(skb, hdr_len);
2990
2991                 for (i = 0; i < pages; i++) {
2992                         dma_addr_t mapping_old;
2993
2994                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2995                         if (unlikely(frag_len <= 4)) {
2996                                 unsigned int tail = 4 - frag_len;
2997
2998                                 rxr->rx_pg_cons = pg_cons;
2999                                 rxr->rx_pg_prod = pg_prod;
3000                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3001                                                         pages - i);
3002                                 skb->len -= tail;
3003                                 if (i == 0) {
3004                                         skb->tail -= tail;
3005                                 } else {
3006                                         skb_frag_t *frag =
3007                                                 &skb_shinfo(skb)->frags[i - 1];
3008                                         frag->size -= tail;
3009                                         skb->data_len -= tail;
3010                                         skb->truesize -= tail;
3011                                 }
3012                                 return 0;
3013                         }
3014                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3015
3016                         /* Don't unmap yet.  If we're unable to allocate a new
3017                          * page, we need to recycle the page and the DMA addr.
3018                          */
3019                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3020                         if (i == pages - 1)
3021                                 frag_len -= 4;
3022
3023                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3024                         rx_pg->page = NULL;
3025
3026                         err = bnx2_alloc_rx_page(bp, rxr,
3027                                                  RX_PG_RING_IDX(pg_prod));
3028                         if (unlikely(err)) {
3029                                 rxr->rx_pg_cons = pg_cons;
3030                                 rxr->rx_pg_prod = pg_prod;
3031                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3032                                                         pages - i);
3033                                 return err;
3034                         }
3035
3036                         pci_unmap_page(bp->pdev, mapping_old,
3037                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3038
3039                         frag_size -= frag_len;
3040                         skb->data_len += frag_len;
3041                         skb->truesize += frag_len;
3042                         skb->len += frag_len;
3043
3044                         pg_prod = NEXT_RX_BD(pg_prod);
3045                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3046                 }
3047                 rxr->rx_pg_prod = pg_prod;
3048                 rxr->rx_pg_cons = pg_cons;
3049         }
3050         return 0;
3051 }
3052
3053 static inline u16
3054 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3055 {
3056         u16 cons;
3057
3058         /* Tell compiler that status block fields can change. */
3059         barrier();
3060         cons = *bnapi->hw_rx_cons_ptr;
3061         barrier();
3062         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3063                 cons++;
3064         return cons;
3065 }
3066
3067 static int
3068 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3069 {
3070         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3071         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3072         struct l2_fhdr *rx_hdr;
3073         int rx_pkt = 0, pg_ring_used = 0;
3074
3075         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3076         sw_cons = rxr->rx_cons;
3077         sw_prod = rxr->rx_prod;
3078
3079         /* Memory barrier necessary as speculative reads of the rx
3080          * buffer can be ahead of the index in the status block
3081          */
3082         rmb();
3083         while (sw_cons != hw_cons) {
3084                 unsigned int len, hdr_len;
3085                 u32 status;
3086                 struct sw_bd *rx_buf;
3087                 struct sk_buff *skb;
3088                 dma_addr_t dma_addr;
3089                 u16 vtag = 0;
3090                 int hw_vlan __maybe_unused = 0;
3091
3092                 sw_ring_cons = RX_RING_IDX(sw_cons);
3093                 sw_ring_prod = RX_RING_IDX(sw_prod);
3094
3095                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3096                 skb = rx_buf->skb;
3097
3098                 rx_buf->skb = NULL;
3099
3100                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3101
3102                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3103                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3104                         PCI_DMA_FROMDEVICE);
3105
3106                 rx_hdr = (struct l2_fhdr *) skb->data;
3107                 len = rx_hdr->l2_fhdr_pkt_len;
3108                 status = rx_hdr->l2_fhdr_status;
3109
3110                 hdr_len = 0;
3111                 if (status & L2_FHDR_STATUS_SPLIT) {
3112                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3113                         pg_ring_used = 1;
3114                 } else if (len > bp->rx_jumbo_thresh) {
3115                         hdr_len = bp->rx_jumbo_thresh;
3116                         pg_ring_used = 1;
3117                 }
3118
3119                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3120                                        L2_FHDR_ERRORS_PHY_DECODE |
3121                                        L2_FHDR_ERRORS_ALIGNMENT |
3122                                        L2_FHDR_ERRORS_TOO_SHORT |
3123                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3124
3125                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3126                                           sw_ring_prod);
3127                         if (pg_ring_used) {
3128                                 int pages;
3129
3130                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3131
3132                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3133                         }
3134                         goto next_rx;
3135                 }
3136
3137                 len -= 4;
3138
3139                 if (len <= bp->rx_copy_thresh) {
3140                         struct sk_buff *new_skb;
3141
3142                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3143                         if (new_skb == NULL) {
3144                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3145                                                   sw_ring_prod);
3146                                 goto next_rx;
3147                         }
3148
3149                         /* aligned copy */
3150                         skb_copy_from_linear_data_offset(skb,
3151                                                          BNX2_RX_OFFSET - 6,
3152                                       new_skb->data, len + 6);
3153                         skb_reserve(new_skb, 6);
3154                         skb_put(new_skb, len);
3155
3156                         bnx2_reuse_rx_skb(bp, rxr, skb,
3157                                 sw_ring_cons, sw_ring_prod);
3158
3159                         skb = new_skb;
3160                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3161                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3162                         goto next_rx;
3163
3164                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3165                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3166                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3167 #ifdef BCM_VLAN
3168                         if (bp->vlgrp)
3169                                 hw_vlan = 1;
3170                         else
3171 #endif
3172                         {
3173                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3174                                         __skb_push(skb, 4);
3175
3176                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3177                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3178                                 ve->h_vlan_TCI = htons(vtag);
3179                                 len += 4;
3180                         }
3181                 }
3182
3183                 skb->protocol = eth_type_trans(skb, bp->dev);
3184
3185                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3186                         (ntohs(skb->protocol) != 0x8100)) {
3187
3188                         dev_kfree_skb(skb);
3189                         goto next_rx;
3190
3191                 }
3192
3193                 skb->ip_summed = CHECKSUM_NONE;
3194                 if (bp->rx_csum &&
3195                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3196                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3197
3198                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3199                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3200                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3201                 }
3202
3203                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3204
3205 #ifdef BCM_VLAN
3206                 if (hw_vlan)
3207                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3208                 else
3209 #endif
3210                         netif_receive_skb(skb);
3211
3212                 rx_pkt++;
3213
3214 next_rx:
3215                 sw_cons = NEXT_RX_BD(sw_cons);
3216                 sw_prod = NEXT_RX_BD(sw_prod);
3217
3218                 if ((rx_pkt == budget))
3219                         break;
3220
3221                 /* Refresh hw_cons to see if there is new work */
3222                 if (sw_cons == hw_cons) {
3223                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3224                         rmb();
3225                 }
3226         }
3227         rxr->rx_cons = sw_cons;
3228         rxr->rx_prod = sw_prod;
3229
3230         if (pg_ring_used)
3231                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3232
3233         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3234
3235         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3236
3237         mmiowb();
3238
3239         return rx_pkt;
3240
3241 }
3242
3243 /* MSI ISR - The only difference between this and the INTx ISR
3244  * is that the MSI interrupt is always serviced.
3245  */
3246 static irqreturn_t
3247 bnx2_msi(int irq, void *dev_instance)
3248 {
3249         struct bnx2_napi *bnapi = dev_instance;
3250         struct bnx2 *bp = bnapi->bp;
3251
3252         prefetch(bnapi->status_blk.msi);
3253         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3254                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3255                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3256
3257         /* Return here if interrupt is disabled. */
3258         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3259                 return IRQ_HANDLED;
3260
3261         napi_schedule(&bnapi->napi);
3262
3263         return IRQ_HANDLED;
3264 }
3265
3266 static irqreturn_t
3267 bnx2_msi_1shot(int irq, void *dev_instance)
3268 {
3269         struct bnx2_napi *bnapi = dev_instance;
3270         struct bnx2 *bp = bnapi->bp;
3271
3272         prefetch(bnapi->status_blk.msi);
3273
3274         /* Return here if interrupt is disabled. */
3275         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3276                 return IRQ_HANDLED;
3277
3278         napi_schedule(&bnapi->napi);
3279
3280         return IRQ_HANDLED;
3281 }
3282
3283 static irqreturn_t
3284 bnx2_interrupt(int irq, void *dev_instance)
3285 {
3286         struct bnx2_napi *bnapi = dev_instance;
3287         struct bnx2 *bp = bnapi->bp;
3288         struct status_block *sblk = bnapi->status_blk.msi;
3289
3290         /* When using INTx, it is possible for the interrupt to arrive
3291          * at the CPU before the status block posted prior to the
3292          * interrupt. Reading a register will flush the status block.
3293          * When using MSI, the MSI message will always complete after
3294          * the status block write.
3295          */
3296         if ((sblk->status_idx == bnapi->last_status_idx) &&
3297             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3298              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3299                 return IRQ_NONE;
3300
3301         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3302                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3303                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3304
3305         /* Read back to deassert IRQ immediately to avoid too many
3306          * spurious interrupts.
3307          */
3308         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3309
3310         /* Return here if interrupt is shared and is disabled. */
3311         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3312                 return IRQ_HANDLED;
3313
3314         if (napi_schedule_prep(&bnapi->napi)) {
3315                 bnapi->last_status_idx = sblk->status_idx;
3316                 __napi_schedule(&bnapi->napi);
3317         }
3318
3319         return IRQ_HANDLED;
3320 }
3321
3322 static inline int
3323 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3324 {
3325         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3326         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3327
3328         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3329             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3330                 return 1;
3331         return 0;
3332 }
3333
3334 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3335                                  STATUS_ATTN_BITS_TIMER_ABORT)
3336
3337 static inline int
3338 bnx2_has_work(struct bnx2_napi *bnapi)
3339 {
3340         struct status_block *sblk = bnapi->status_blk.msi;
3341
3342         if (bnx2_has_fast_work(bnapi))
3343                 return 1;
3344
3345 #ifdef BCM_CNIC
3346         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3347                 return 1;
3348 #endif
3349
3350         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3351             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3352                 return 1;
3353
3354         return 0;
3355 }
3356
3357 static void
3358 bnx2_chk_missed_msi(struct bnx2 *bp)
3359 {
3360         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3361         u32 msi_ctrl;
3362
3363         if (bnx2_has_work(bnapi)) {
3364                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3365                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3366                         return;
3367
3368                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3369                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3370                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3371                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3372                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3373                 }
3374         }
3375
3376         bp->idle_chk_status_idx = bnapi->last_status_idx;
3377 }
3378
3379 #ifdef BCM_CNIC
3380 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3381 {
3382         struct cnic_ops *c_ops;
3383
3384         if (!bnapi->cnic_present)
3385                 return;
3386
3387         rcu_read_lock();
3388         c_ops = rcu_dereference(bp->cnic_ops);
3389         if (c_ops)
3390                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3391                                                       bnapi->status_blk.msi);
3392         rcu_read_unlock();
3393 }
3394 #endif
3395
3396 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3397 {
3398         struct status_block *sblk = bnapi->status_blk.msi;
3399         u32 status_attn_bits = sblk->status_attn_bits;
3400         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3401
3402         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3403             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3404
3405                 bnx2_phy_int(bp, bnapi);
3406
3407                 /* This is needed to take care of transient status
3408                  * during link changes.
3409                  */
3410                 REG_WR(bp, BNX2_HC_COMMAND,
3411                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3412                 REG_RD(bp, BNX2_HC_COMMAND);
3413         }
3414 }
3415
3416 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3417                           int work_done, int budget)
3418 {
3419         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3420         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3421
3422         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3423                 bnx2_tx_int(bp, bnapi, 0);
3424
3425         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3426                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3427
3428         return work_done;
3429 }
3430
3431 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3432 {
3433         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3434         struct bnx2 *bp = bnapi->bp;
3435         int work_done = 0;
3436         struct status_block_msix *sblk = bnapi->status_blk.msix;
3437
3438         while (1) {
3439                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3440                 if (unlikely(work_done >= budget))
3441                         break;
3442
3443                 bnapi->last_status_idx = sblk->status_idx;
3444                 /* status idx must be read before checking for more work. */
3445                 rmb();
3446                 if (likely(!bnx2_has_fast_work(bnapi))) {
3447
3448                         napi_complete(napi);
3449                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3450                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3451                                bnapi->last_status_idx);
3452                         break;
3453                 }
3454         }
3455         return work_done;
3456 }
3457
3458 static int bnx2_poll(struct napi_struct *napi, int budget)
3459 {
3460         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3461         struct bnx2 *bp = bnapi->bp;
3462         int work_done = 0;
3463         struct status_block *sblk = bnapi->status_blk.msi;
3464
3465         while (1) {
3466                 bnx2_poll_link(bp, bnapi);
3467
3468                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469
3470 #ifdef BCM_CNIC
3471                 bnx2_poll_cnic(bp, bnapi);
3472 #endif
3473
3474                 /* bnapi->last_status_idx is used below to tell the hw how
3475                  * much work has been processed, so we must read it before
3476                  * checking for more work.
3477                  */
3478                 bnapi->last_status_idx = sblk->status_idx;
3479
3480                 if (unlikely(work_done >= budget))
3481                         break;
3482
3483                 rmb();
3484                 if (likely(!bnx2_has_work(bnapi))) {
3485                         napi_complete(napi);
3486                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3487                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3488                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3489                                        bnapi->last_status_idx);
3490                                 break;
3491                         }
3492                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3493                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3494                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3495                                bnapi->last_status_idx);
3496
3497                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3498                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3499                                bnapi->last_status_idx);
3500                         break;
3501                 }
3502         }
3503
3504         return work_done;
3505 }
3506
3507 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3508  * from set_multicast.
3509  */
3510 static void
3511 bnx2_set_rx_mode(struct net_device *dev)
3512 {
3513         struct bnx2 *bp = netdev_priv(dev);
3514         u32 rx_mode, sort_mode;
3515         struct netdev_hw_addr *ha;
3516         int i;
3517
3518         if (!netif_running(dev))
3519                 return;
3520
3521         spin_lock_bh(&bp->phy_lock);
3522
3523         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3524                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3525         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3526 #ifdef BCM_VLAN
3527         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3528                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3529 #else
3530         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3531                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3532 #endif
3533         if (dev->flags & IFF_PROMISC) {
3534                 /* Promiscuous mode. */
3535                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3536                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3537                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3538         }
3539         else if (dev->flags & IFF_ALLMULTI) {
3540                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3541                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3542                                0xffffffff);
3543                 }
3544                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3545         }
3546         else {
3547                 /* Accept one or more multicast(s). */
3548                 struct dev_mc_list *mclist;
3549                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3550                 u32 regidx;
3551                 u32 bit;
3552                 u32 crc;
3553
3554                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3555
3556                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3557                      i++, mclist = mclist->next) {
3558
3559                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3560                         bit = crc & 0xff;
3561                         regidx = (bit & 0xe0) >> 5;
3562                         bit &= 0x1f;
3563                         mc_filter[regidx] |= (1 << bit);
3564                 }
3565
3566                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568                                mc_filter[i]);
3569                 }
3570
3571                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3572         }
3573
3574         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3575                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3576                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3577                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3578         } else if (!(dev->flags & IFF_PROMISC)) {
3579                 /* Add all entries into to the match filter list */
3580                 i = 0;
3581                 list_for_each_entry(ha, &dev->uc.list, list) {
3582                         bnx2_set_mac_addr(bp, ha->addr,
3583                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3584                         sort_mode |= (1 <<
3585                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3586                         i++;
3587                 }
3588
3589         }
3590
3591         if (rx_mode != bp->rx_mode) {
3592                 bp->rx_mode = rx_mode;
3593                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3594         }
3595
3596         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3597         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3598         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3599
3600         spin_unlock_bh(&bp->phy_lock);
3601 }
3602
3603 static int __devinit
3604 check_fw_section(const struct firmware *fw,
3605                  const struct bnx2_fw_file_section *section,
3606                  u32 alignment, bool non_empty)
3607 {
3608         u32 offset = be32_to_cpu(section->offset);
3609         u32 len = be32_to_cpu(section->len);
3610
3611         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3612                 return -EINVAL;
3613         if ((non_empty && len == 0) || len > fw->size - offset ||
3614             len & (alignment - 1))
3615                 return -EINVAL;
3616         return 0;
3617 }
3618
3619 static int __devinit
3620 check_mips_fw_entry(const struct firmware *fw,
3621                     const struct bnx2_mips_fw_file_entry *entry)
3622 {
3623         if (check_fw_section(fw, &entry->text, 4, true) ||
3624             check_fw_section(fw, &entry->data, 4, false) ||
3625             check_fw_section(fw, &entry->rodata, 4, false))
3626                 return -EINVAL;
3627         return 0;
3628 }
3629
3630 static int __devinit
3631 bnx2_request_firmware(struct bnx2 *bp)
3632 {
3633         const char *mips_fw_file, *rv2p_fw_file;
3634         const struct bnx2_mips_fw_file *mips_fw;
3635         const struct bnx2_rv2p_fw_file *rv2p_fw;
3636         int rc;
3637
3638         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639                 mips_fw_file = FW_MIPS_FILE_09;
3640                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3641                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3642                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3643                 else
3644                         rv2p_fw_file = FW_RV2P_FILE_09;
3645         } else {
3646                 mips_fw_file = FW_MIPS_FILE_06;
3647                 rv2p_fw_file = FW_RV2P_FILE_06;
3648         }
3649
3650         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3651         if (rc) {
3652                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3653                        mips_fw_file);
3654                 return rc;
3655         }
3656
3657         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3658         if (rc) {
3659                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3660                        rv2p_fw_file);
3661                 return rc;
3662         }
3663         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3664         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3665         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3666             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3667             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3668             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3669             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3670             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3671                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3672                        mips_fw_file);
3673                 return -EINVAL;
3674         }
3675         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3676             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3677             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3678                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3679                        rv2p_fw_file);
3680                 return -EINVAL;
3681         }
3682
3683         return 0;
3684 }
3685
3686 static u32
3687 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3688 {
3689         switch (idx) {
3690         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3691                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3692                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3693                 break;
3694         }
3695         return rv2p_code;
3696 }
3697
3698 static int
3699 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3700              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3701 {
3702         u32 rv2p_code_len, file_offset;
3703         __be32 *rv2p_code;
3704         int i;
3705         u32 val, cmd, addr;
3706
3707         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3708         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3709
3710         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3711
3712         if (rv2p_proc == RV2P_PROC1) {
3713                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3714                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3715         } else {
3716                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3717                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3718         }
3719
3720         for (i = 0; i < rv2p_code_len; i += 8) {
3721                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3722                 rv2p_code++;
3723                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3724                 rv2p_code++;
3725
3726                 val = (i / 8) | cmd;
3727                 REG_WR(bp, addr, val);
3728         }
3729
3730         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3731         for (i = 0; i < 8; i++) {
3732                 u32 loc, code;
3733
3734                 loc = be32_to_cpu(fw_entry->fixup[i]);
3735                 if (loc && ((loc * 4) < rv2p_code_len)) {
3736                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3737                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3738                         code = be32_to_cpu(*(rv2p_code + loc));
3739                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3740                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3741
3742                         val = (loc / 2) | cmd;
3743                         REG_WR(bp, addr, val);
3744                 }
3745         }
3746
3747         /* Reset the processor, un-stall is done later. */
3748         if (rv2p_proc == RV2P_PROC1) {
3749                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3750         }
3751         else {
3752                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3753         }
3754
3755         return 0;
3756 }
3757
3758 static int
3759 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3760             const struct bnx2_mips_fw_file_entry *fw_entry)
3761 {
3762         u32 addr, len, file_offset;
3763         __be32 *data;
3764         u32 offset;
3765         u32 val;
3766
3767         /* Halt the CPU. */
3768         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3769         val |= cpu_reg->mode_value_halt;
3770         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3771         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3772
3773         /* Load the Text area. */
3774         addr = be32_to_cpu(fw_entry->text.addr);
3775         len = be32_to_cpu(fw_entry->text.len);
3776         file_offset = be32_to_cpu(fw_entry->text.offset);
3777         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3778
3779         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3780         if (len) {
3781                 int j;
3782
3783                 for (j = 0; j < (len / 4); j++, offset += 4)
3784                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3785         }
3786
3787         /* Load the Data area. */
3788         addr = be32_to_cpu(fw_entry->data.addr);
3789         len = be32_to_cpu(fw_entry->data.len);
3790         file_offset = be32_to_cpu(fw_entry->data.offset);
3791         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3792
3793         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3794         if (len) {
3795                 int j;
3796
3797                 for (j = 0; j < (len / 4); j++, offset += 4)
3798                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3799         }
3800
3801         /* Load the Read-Only area. */
3802         addr = be32_to_cpu(fw_entry->rodata.addr);
3803         len = be32_to_cpu(fw_entry->rodata.len);
3804         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3805         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3806
3807         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3808         if (len) {
3809                 int j;
3810
3811                 for (j = 0; j < (len / 4); j++, offset += 4)
3812                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3813         }
3814
3815         /* Clear the pre-fetch instruction. */
3816         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3817
3818         val = be32_to_cpu(fw_entry->start_addr);
3819         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3820
3821         /* Start the CPU. */
3822         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3823         val &= ~cpu_reg->mode_value_halt;
3824         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3825         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3826
3827         return 0;
3828 }
3829
3830 static int
3831 bnx2_init_cpus(struct bnx2 *bp)
3832 {
3833         const struct bnx2_mips_fw_file *mips_fw =
3834                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3835         const struct bnx2_rv2p_fw_file *rv2p_fw =
3836                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3837         int rc;
3838
3839         /* Initialize the RV2P processor. */
3840         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3841         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3842
3843         /* Initialize the RX Processor. */
3844         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3845         if (rc)
3846                 goto init_cpu_err;
3847
3848         /* Initialize the TX Processor. */
3849         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3850         if (rc)
3851                 goto init_cpu_err;
3852
3853         /* Initialize the TX Patch-up Processor. */
3854         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3855         if (rc)
3856                 goto init_cpu_err;
3857
3858         /* Initialize the Completion Processor. */
3859         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3860         if (rc)
3861                 goto init_cpu_err;
3862
3863         /* Initialize the Command Processor. */
3864         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3865
3866 init_cpu_err:
3867         return rc;
3868 }
3869
3870 static int
3871 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3872 {
3873         u16 pmcsr;
3874
3875         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3876
3877         switch (state) {
3878         case PCI_D0: {
3879                 u32 val;
3880
3881                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3882                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3883                         PCI_PM_CTRL_PME_STATUS);
3884
3885                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3886                         /* delay required during transition out of D3hot */
3887                         msleep(20);
3888
3889                 val = REG_RD(bp, BNX2_EMAC_MODE);
3890                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3891                 val &= ~BNX2_EMAC_MODE_MPKT;
3892                 REG_WR(bp, BNX2_EMAC_MODE, val);
3893
3894                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3895                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3896                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3897                 break;
3898         }
3899         case PCI_D3hot: {
3900                 int i;
3901                 u32 val, wol_msg;
3902
3903                 if (bp->wol) {
3904                         u32 advertising;
3905                         u8 autoneg;
3906
3907                         autoneg = bp->autoneg;
3908                         advertising = bp->advertising;
3909
3910                         if (bp->phy_port == PORT_TP) {
3911                                 bp->autoneg = AUTONEG_SPEED;
3912                                 bp->advertising = ADVERTISED_10baseT_Half |
3913                                         ADVERTISED_10baseT_Full |
3914                                         ADVERTISED_100baseT_Half |
3915                                         ADVERTISED_100baseT_Full |
3916                                         ADVERTISED_Autoneg;
3917                         }
3918
3919                         spin_lock_bh(&bp->phy_lock);
3920                         bnx2_setup_phy(bp, bp->phy_port);
3921                         spin_unlock_bh(&bp->phy_lock);
3922
3923                         bp->autoneg = autoneg;
3924                         bp->advertising = advertising;
3925
3926                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3927
3928                         val = REG_RD(bp, BNX2_EMAC_MODE);
3929
3930                         /* Enable port mode. */
3931                         val &= ~BNX2_EMAC_MODE_PORT;
3932                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3933                                BNX2_EMAC_MODE_ACPI_RCVD |
3934                                BNX2_EMAC_MODE_MPKT;
3935                         if (bp->phy_port == PORT_TP)
3936                                 val |= BNX2_EMAC_MODE_PORT_MII;
3937                         else {
3938                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3939                                 if (bp->line_speed == SPEED_2500)
3940                                         val |= BNX2_EMAC_MODE_25G_MODE;
3941                         }
3942
3943                         REG_WR(bp, BNX2_EMAC_MODE, val);
3944
3945                         /* receive all multicast */
3946                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3947                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3948                                        0xffffffff);
3949                         }
3950                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3951                                BNX2_EMAC_RX_MODE_SORT_MODE);
3952
3953                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3954                               BNX2_RPM_SORT_USER0_MC_EN;
3955                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3956                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3957                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3958                                BNX2_RPM_SORT_USER0_ENA);
3959
3960                         /* Need to enable EMAC and RPM for WOL. */
3961                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3962                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3963                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3964                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3965
3966                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3967                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3968                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3969
3970                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3971                 }
3972                 else {
3973                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3974                 }
3975
3976                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3977                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3978                                      1, 0);
3979
3980                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3981                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3982                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3983
3984                         if (bp->wol)
3985                                 pmcsr |= 3;
3986                 }
3987                 else {
3988                         pmcsr |= 3;
3989                 }
3990                 if (bp->wol) {
3991                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3992                 }
3993                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3994                                       pmcsr);
3995
3996                 /* No more memory access after this point until
3997                  * device is brought back to D0.
3998                  */
3999                 udelay(50);
4000                 break;
4001         }
4002         default:
4003                 return -EINVAL;
4004         }
4005         return 0;
4006 }
4007
4008 static int
4009 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4010 {
4011         u32 val;
4012         int j;
4013
4014         /* Request access to the flash interface. */
4015         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4016         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4017                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4018                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4019                         break;
4020
4021                 udelay(5);
4022         }
4023
4024         if (j >= NVRAM_TIMEOUT_COUNT)
4025                 return -EBUSY;
4026
4027         return 0;
4028 }
4029
4030 static int
4031 bnx2_release_nvram_lock(struct bnx2 *bp)
4032 {
4033         int j;
4034         u32 val;
4035
4036         /* Relinquish nvram interface. */
4037         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4038
4039         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4040                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4041                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4042                         break;
4043
4044                 udelay(5);
4045         }
4046
4047         if (j >= NVRAM_TIMEOUT_COUNT)
4048                 return -EBUSY;
4049
4050         return 0;
4051 }
4052
4053
4054 static int
4055 bnx2_enable_nvram_write(struct bnx2 *bp)
4056 {
4057         u32 val;
4058
4059         val = REG_RD(bp, BNX2_MISC_CFG);
4060         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4061
4062         if (bp->flash_info->flags & BNX2_NV_WREN) {
4063                 int j;
4064
4065                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4066                 REG_WR(bp, BNX2_NVM_COMMAND,
4067                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4068
4069                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4070                         udelay(5);
4071
4072                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4073                         if (val & BNX2_NVM_COMMAND_DONE)
4074                                 break;
4075                 }
4076
4077                 if (j >= NVRAM_TIMEOUT_COUNT)
4078                         return -EBUSY;
4079         }
4080         return 0;
4081 }
4082
4083 static void
4084 bnx2_disable_nvram_write(struct bnx2 *bp)
4085 {
4086         u32 val;
4087
4088         val = REG_RD(bp, BNX2_MISC_CFG);
4089         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4090 }
4091
4092
4093 static void
4094 bnx2_enable_nvram_access(struct bnx2 *bp)
4095 {
4096         u32 val;
4097
4098         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4099         /* Enable both bits, even on read. */
4100         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4101                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4102 }
4103
4104 static void
4105 bnx2_disable_nvram_access(struct bnx2 *bp)
4106 {
4107         u32 val;
4108
4109         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4110         /* Disable both bits, even after read. */
4111         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4112                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4113                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4114 }
4115
4116 static int
4117 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4118 {
4119         u32 cmd;
4120         int j;
4121
4122         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4123                 /* Buffered flash, no erase needed */
4124                 return 0;
4125
4126         /* Build an erase command */
4127         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4128               BNX2_NVM_COMMAND_DOIT;
4129
4130         /* Need to clear DONE bit separately. */
4131         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4132
4133         /* Address of the NVRAM to read from. */
4134         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4135
4136         /* Issue an erase command. */
4137         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4138
4139         /* Wait for completion. */
4140         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4141                 u32 val;
4142
4143                 udelay(5);
4144
4145                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4146                 if (val & BNX2_NVM_COMMAND_DONE)
4147                         break;
4148         }
4149
4150         if (j >= NVRAM_TIMEOUT_COUNT)
4151                 return -EBUSY;
4152
4153         return 0;
4154 }
4155
4156 static int
4157 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4158 {
4159         u32 cmd;
4160         int j;
4161
4162         /* Build the command word. */
4163         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4164
4165         /* Calculate an offset of a buffered flash, not needed for 5709. */
4166         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4167                 offset = ((offset / bp->flash_info->page_size) <<
4168                            bp->flash_info->page_bits) +
4169                           (offset % bp->flash_info->page_size);
4170         }
4171
4172         /* Need to clear DONE bit separately. */
4173         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4174
4175         /* Address of the NVRAM to read from. */
4176         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4177
4178         /* Issue a read command. */
4179         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4180
4181         /* Wait for completion. */
4182         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4183                 u32 val;
4184
4185                 udelay(5);
4186
4187                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4188                 if (val & BNX2_NVM_COMMAND_DONE) {
4189                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4190                         memcpy(ret_val, &v, 4);
4191                         break;
4192                 }
4193         }
4194         if (j >= NVRAM_TIMEOUT_COUNT)
4195                 return -EBUSY;
4196
4197         return 0;
4198 }
4199
4200
4201 static int
4202 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4203 {
4204         u32 cmd;
4205         __be32 val32;
4206         int j;
4207
4208         /* Build the command word. */
4209         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4210
4211         /* Calculate an offset of a buffered flash, not needed for 5709. */
4212         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4213                 offset = ((offset / bp->flash_info->page_size) <<
4214                           bp->flash_info->page_bits) +
4215                          (offset % bp->flash_info->page_size);
4216         }
4217
4218         /* Need to clear DONE bit separately. */
4219         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4220
4221         memcpy(&val32, val, 4);
4222
4223         /* Write the data. */
4224         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4225
4226         /* Address of the NVRAM to write to. */
4227         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4228
4229         /* Issue the write command. */
4230         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4231
4232         /* Wait for completion. */
4233         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4234                 udelay(5);
4235
4236                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4237                         break;
4238         }
4239         if (j >= NVRAM_TIMEOUT_COUNT)
4240                 return -EBUSY;
4241
4242         return 0;
4243 }
4244
4245 static int
4246 bnx2_init_nvram(struct bnx2 *bp)
4247 {
4248         u32 val;
4249         int j, entry_count, rc = 0;
4250         const struct flash_spec *flash;
4251
4252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4253                 bp->flash_info = &flash_5709;
4254                 goto get_flash_size;
4255         }
4256
4257         /* Determine the selected interface. */
4258         val = REG_RD(bp, BNX2_NVM_CFG1);
4259
4260         entry_count = ARRAY_SIZE(flash_table);
4261
4262         if (val & 0x40000000) {
4263
4264                 /* Flash interface has been reconfigured */
4265                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4266                      j++, flash++) {
4267                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4268                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4269                                 bp->flash_info = flash;
4270                                 break;
4271                         }
4272                 }
4273         }
4274         else {
4275                 u32 mask;
4276                 /* Not yet been reconfigured */
4277
4278                 if (val & (1 << 23))
4279                         mask = FLASH_BACKUP_STRAP_MASK;
4280                 else
4281                         mask = FLASH_STRAP_MASK;
4282
4283                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4284                         j++, flash++) {
4285
4286                         if ((val & mask) == (flash->strapping & mask)) {
4287                                 bp->flash_info = flash;
4288
4289                                 /* Request access to the flash interface. */
4290                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4291                                         return rc;
4292
4293                                 /* Enable access to flash interface */
4294                                 bnx2_enable_nvram_access(bp);
4295
4296                                 /* Reconfigure the flash interface */
4297                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4298                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4299                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4300                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4301
4302                                 /* Disable access to flash interface */
4303                                 bnx2_disable_nvram_access(bp);
4304                                 bnx2_release_nvram_lock(bp);
4305
4306                                 break;
4307                         }
4308                 }
4309         } /* if (val & 0x40000000) */
4310
4311         if (j == entry_count) {
4312                 bp->flash_info = NULL;
4313                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4314                 return -ENODEV;
4315         }
4316
4317 get_flash_size:
4318         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4319         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4320         if (val)
4321                 bp->flash_size = val;
4322         else
4323                 bp->flash_size = bp->flash_info->total_size;
4324
4325         return rc;
4326 }
4327
4328 static int
4329 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4330                 int buf_size)
4331 {
4332         int rc = 0;
4333         u32 cmd_flags, offset32, len32, extra;
4334
4335         if (buf_size == 0)
4336                 return 0;
4337
4338         /* Request access to the flash interface. */
4339         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4340                 return rc;
4341
4342         /* Enable access to flash interface */
4343         bnx2_enable_nvram_access(bp);
4344
4345         len32 = buf_size;
4346         offset32 = offset;
4347         extra = 0;
4348
4349         cmd_flags = 0;
4350
4351         if (offset32 & 3) {
4352                 u8 buf[4];
4353                 u32 pre_len;
4354
4355                 offset32 &= ~3;
4356                 pre_len = 4 - (offset & 3);
4357
4358                 if (pre_len >= len32) {
4359                         pre_len = len32;
4360                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4361                                     BNX2_NVM_COMMAND_LAST;
4362                 }
4363                 else {
4364                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4365                 }
4366
4367                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4368
4369                 if (rc)
4370                         return rc;
4371
4372                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4373
4374                 offset32 += 4;
4375                 ret_buf += pre_len;
4376                 len32 -= pre_len;
4377         }
4378         if (len32 & 3) {
4379                 extra = 4 - (len32 & 3);
4380                 len32 = (len32 + 4) & ~3;
4381         }
4382
4383         if (len32 == 4) {
4384                 u8 buf[4];
4385
4386                 if (cmd_flags)
4387                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4388                 else
4389                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4390                                     BNX2_NVM_COMMAND_LAST;
4391
4392                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393
4394                 memcpy(ret_buf, buf, 4 - extra);
4395         }
4396         else if (len32 > 0) {
4397                 u8 buf[4];
4398
4399                 /* Read the first word. */
4400                 if (cmd_flags)
4401                         cmd_flags = 0;
4402                 else
4403                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4404
4405                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4406
4407                 /* Advance to the next dword. */
4408                 offset32 += 4;
4409                 ret_buf += 4;
4410                 len32 -= 4;
4411
4412                 while (len32 > 4 && rc == 0) {
4413                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4414
4415                         /* Advance to the next dword. */
4416                         offset32 += 4;
4417                         ret_buf += 4;
4418                         len32 -= 4;
4419                 }
4420
4421                 if (rc)
4422                         return rc;
4423
4424                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4425                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4426
4427                 memcpy(ret_buf, buf, 4 - extra);
4428         }
4429
4430         /* Disable access to flash interface */
4431         bnx2_disable_nvram_access(bp);
4432
4433         bnx2_release_nvram_lock(bp);
4434
4435         return rc;
4436 }
4437
4438 static int
4439 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4440                 int buf_size)
4441 {
4442         u32 written, offset32, len32;
4443         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4444         int rc = 0;
4445         int align_start, align_end;
4446
4447         buf = data_buf;
4448         offset32 = offset;
4449         len32 = buf_size;
4450         align_start = align_end = 0;
4451
4452         if ((align_start = (offset32 & 3))) {
4453                 offset32 &= ~3;
4454                 len32 += align_start;
4455                 if (len32 < 4)
4456                         len32 = 4;
4457                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4458                         return rc;
4459         }
4460
4461         if (len32 & 3) {
4462                 align_end = 4 - (len32 & 3);
4463                 len32 += align_end;
4464                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4465                         return rc;
4466         }
4467
4468         if (align_start || align_end) {
4469                 align_buf = kmalloc(len32, GFP_KERNEL);
4470                 if (align_buf == NULL)
4471                         return -ENOMEM;
4472                 if (align_start) {
4473                         memcpy(align_buf, start, 4);
4474                 }
4475                 if (align_end) {
4476                         memcpy(align_buf + len32 - 4, end, 4);
4477                 }
4478                 memcpy(align_buf + align_start, data_buf, buf_size);
4479                 buf = align_buf;
4480         }
4481
4482         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4483                 flash_buffer = kmalloc(264, GFP_KERNEL);
4484                 if (flash_buffer == NULL) {
4485                         rc = -ENOMEM;
4486                         goto nvram_write_end;
4487                 }
4488         }
4489
4490         written = 0;
4491         while ((written < len32) && (rc == 0)) {
4492                 u32 page_start, page_end, data_start, data_end;
4493                 u32 addr, cmd_flags;
4494                 int i;
4495
4496                 /* Find the page_start addr */
4497                 page_start = offset32 + written;
4498                 page_start -= (page_start % bp->flash_info->page_size);
4499                 /* Find the page_end addr */
4500                 page_end = page_start + bp->flash_info->page_size;
4501                 /* Find the data_start addr */
4502                 data_start = (written == 0) ? offset32 : page_start;
4503                 /* Find the data_end addr */
4504                 data_end = (page_end > offset32 + len32) ?
4505                         (offset32 + len32) : page_end;
4506
4507                 /* Request access to the flash interface. */
4508                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4509                         goto nvram_write_end;
4510
4511                 /* Enable access to flash interface */
4512                 bnx2_enable_nvram_access(bp);
4513
4514                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4515                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4516                         int j;
4517
4518                         /* Read the whole page into the buffer
4519                          * (non-buffer flash only) */
4520                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4521                                 if (j == (bp->flash_info->page_size - 4)) {
4522                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4523                                 }
4524                                 rc = bnx2_nvram_read_dword(bp,
4525                                         page_start + j,
4526                                         &flash_buffer[j],
4527                                         cmd_flags);
4528
4529                                 if (rc)
4530                                         goto nvram_write_end;
4531
4532                                 cmd_flags = 0;
4533                         }
4534                 }
4535
4536                 /* Enable writes to flash interface (unlock write-protect) */
4537                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4538                         goto nvram_write_end;
4539
4540                 /* Loop to write back the buffer data from page_start to
4541                  * data_start */
4542                 i = 0;
4543                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4544                         /* Erase the page */
4545                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4546                                 goto nvram_write_end;
4547
4548                         /* Re-enable the write again for the actual write */
4549                         bnx2_enable_nvram_write(bp);
4550
4551                         for (addr = page_start; addr < data_start;
4552                                 addr += 4, i += 4) {
4553
4554                                 rc = bnx2_nvram_write_dword(bp, addr,
4555                                         &flash_buffer[i], cmd_flags);
4556
4557                                 if (rc != 0)
4558                                         goto nvram_write_end;
4559
4560                                 cmd_flags = 0;
4561                         }
4562                 }
4563
4564                 /* Loop to write the new data from data_start to data_end */
4565                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4566                         if ((addr == page_end - 4) ||
4567                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4568                                  (addr == data_end - 4))) {
4569
4570                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4571                         }
4572                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4573                                 cmd_flags);
4574
4575                         if (rc != 0)
4576                                 goto nvram_write_end;
4577
4578                         cmd_flags = 0;
4579                         buf += 4;
4580                 }
4581
4582                 /* Loop to write back the buffer data from data_end
4583                  * to page_end */
4584                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4585                         for (addr = data_end; addr < page_end;
4586                                 addr += 4, i += 4) {
4587
4588                                 if (addr == page_end-4) {
4589                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4590                                 }
4591                                 rc = bnx2_nvram_write_dword(bp, addr,
4592                                         &flash_buffer[i], cmd_flags);
4593
4594                                 if (rc != 0)
4595                                         goto nvram_write_end;
4596
4597                                 cmd_flags = 0;
4598                         }
4599                 }
4600
4601                 /* Disable writes to flash interface (lock write-protect) */
4602                 bnx2_disable_nvram_write(bp);
4603
4604                 /* Disable access to flash interface */
4605                 bnx2_disable_nvram_access(bp);
4606                 bnx2_release_nvram_lock(bp);
4607
4608                 /* Increment written */
4609                 written += data_end - data_start;
4610         }
4611
4612 nvram_write_end:
4613         kfree(flash_buffer);
4614         kfree(align_buf);
4615         return rc;
4616 }
4617
4618 static void
4619 bnx2_init_fw_cap(struct bnx2 *bp)
4620 {
4621         u32 val, sig = 0;
4622
4623         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4624         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4625
4626         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4627                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4628
4629         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4630         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4631                 return;
4632
4633         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4634                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4635                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4636         }
4637
4638         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4639             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4640                 u32 link;
4641
4642                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4643
4644                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4645                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4646                         bp->phy_port = PORT_FIBRE;
4647                 else
4648                         bp->phy_port = PORT_TP;
4649
4650                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4651                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4652         }
4653
4654         if (netif_running(bp->dev) && sig)
4655                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4656 }
4657
4658 static void
4659 bnx2_setup_msix_tbl(struct bnx2 *bp)
4660 {
4661         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4662
4663         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4664         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4665 }
4666
4667 static int
4668 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4669 {
4670         u32 val;
4671         int i, rc = 0;
4672         u8 old_port;
4673
4674         /* Wait for the current PCI transaction to complete before
4675          * issuing a reset. */
4676         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4677                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4678                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4679                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4680                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4681         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4682         udelay(5);
4683
4684         /* Wait for the firmware to tell us it is ok to issue a reset. */
4685         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4686
4687         /* Deposit a driver reset signature so the firmware knows that
4688          * this is a soft reset. */
4689         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4690                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4691
4692         /* Do a dummy read to force the chip to complete all current transaction
4693          * before we issue a reset. */
4694         val = REG_RD(bp, BNX2_MISC_ID);
4695
4696         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4697                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4698                 REG_RD(bp, BNX2_MISC_COMMAND);
4699                 udelay(5);
4700
4701                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4702                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4703
4704                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4705
4706         } else {
4707                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4708                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4709                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4710
4711                 /* Chip reset. */
4712                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4713
4714                 /* Reading back any register after chip reset will hang the
4715                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4716                  * of margin for write posting.
4717                  */
4718                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4719                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4720                         msleep(20);
4721
4722                 /* Reset takes approximate 30 usec */
4723                 for (i = 0; i < 10; i++) {
4724                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4725                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4726                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4727                                 break;
4728                         udelay(10);
4729                 }
4730
4731                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4733                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4734                         return -EBUSY;
4735                 }
4736         }
4737
4738         /* Make sure byte swapping is properly configured. */
4739         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4740         if (val != 0x01020304) {
4741                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4742                 return -ENODEV;
4743         }
4744
4745         /* Wait for the firmware to finish its initialization. */
4746         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4747         if (rc)
4748                 return rc;
4749
4750         spin_lock_bh(&bp->phy_lock);
4751         old_port = bp->phy_port;
4752         bnx2_init_fw_cap(bp);
4753         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4754             old_port != bp->phy_port)
4755                 bnx2_set_default_remote_link(bp);
4756         spin_unlock_bh(&bp->phy_lock);
4757
4758         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4759                 /* Adjust the voltage regular to two steps lower.  The default
4760                  * of this register is 0x0000000e. */
4761                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4762
4763                 /* Remove bad rbuf memory from the free pool. */
4764                 rc = bnx2_alloc_bad_rbuf(bp);
4765         }
4766
4767         if (bp->flags & BNX2_FLAG_USING_MSIX)
4768                 bnx2_setup_msix_tbl(bp);
4769
4770         return rc;
4771 }
4772
4773 static int
4774 bnx2_init_chip(struct bnx2 *bp)
4775 {
4776         u32 val, mtu;
4777         int rc, i;
4778
4779         /* Make sure the interrupt is not active. */
4780         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4781
4782         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4783               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4784 #ifdef __BIG_ENDIAN
4785               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4786 #endif
4787               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4788               DMA_READ_CHANS << 12 |
4789               DMA_WRITE_CHANS << 16;
4790
4791         val |= (0x2 << 20) | (1 << 11);
4792
4793         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4794                 val |= (1 << 23);
4795
4796         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4797             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4798                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4799
4800         REG_WR(bp, BNX2_DMA_CONFIG, val);
4801
4802         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4803                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4804                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4805                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4806         }
4807
4808         if (bp->flags & BNX2_FLAG_PCIX) {
4809                 u16 val16;
4810
4811                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4812                                      &val16);
4813                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4814                                       val16 & ~PCI_X_CMD_ERO);
4815         }
4816
4817         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4818                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4819                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4820                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4821
4822         /* Initialize context mapping and zero out the quick contexts.  The
4823          * context block must have already been enabled. */
4824         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4825                 rc = bnx2_init_5709_context(bp);
4826                 if (rc)
4827                         return rc;
4828         } else
4829                 bnx2_init_context(bp);
4830
4831         if ((rc = bnx2_init_cpus(bp)) != 0)
4832                 return rc;
4833
4834         bnx2_init_nvram(bp);
4835
4836         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4837
4838         val = REG_RD(bp, BNX2_MQ_CONFIG);
4839         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4840         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4841         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4842                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4843                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4844                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4845         }
4846
4847         REG_WR(bp, BNX2_MQ_CONFIG, val);
4848
4849         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4850         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4851         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4852
4853         val = (BCM_PAGE_BITS - 8) << 24;
4854         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4855
4856         /* Configure page size. */
4857         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4858         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4859         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4860         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4861
4862         val = bp->mac_addr[0] +
4863               (bp->mac_addr[1] << 8) +
4864               (bp->mac_addr[2] << 16) +
4865               bp->mac_addr[3] +
4866               (bp->mac_addr[4] << 8) +
4867               (bp->mac_addr[5] << 16);
4868         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4869
4870         /* Program the MTU.  Also include 4 bytes for CRC32. */
4871         mtu = bp->dev->mtu;
4872         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4873         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4874                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4875         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4876
4877         if (mtu < 1500)
4878                 mtu = 1500;
4879
4880         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4881         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4882         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4883
4884         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4885         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4886                 bp->bnx2_napi[i].last_status_idx = 0;
4887
4888         bp->idle_chk_status_idx = 0xffff;
4889
4890         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4891
4892         /* Set up how to generate a link change interrupt. */
4893         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4894
4895         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4896                (u64) bp->status_blk_mapping & 0xffffffff);
4897         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4898
4899         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4900                (u64) bp->stats_blk_mapping & 0xffffffff);
4901         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4902                (u64) bp->stats_blk_mapping >> 32);
4903
4904         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4905                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4906
4907         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4908                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4909
4910         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4911                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4912
4913         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4914
4915         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4916
4917         REG_WR(bp, BNX2_HC_COM_TICKS,
4918                (bp->com_ticks_int << 16) | bp->com_ticks);
4919
4920         REG_WR(bp, BNX2_HC_CMD_TICKS,
4921                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4922
4923         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4924                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4925         else
4926                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4927         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4928
4929         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4930                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4931         else {
4932                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4933                       BNX2_HC_CONFIG_COLLECT_STATS;
4934         }
4935
4936         if (bp->irq_nvecs > 1) {
4937                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4938                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4939
4940                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4941         }
4942
4943         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4944                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4945
4946         REG_WR(bp, BNX2_HC_CONFIG, val);
4947
4948         for (i = 1; i < bp->irq_nvecs; i++) {
4949                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4950                            BNX2_HC_SB_CONFIG_1;
4951
4952                 REG_WR(bp, base,
4953                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4954                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4955                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4956
4957                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4958                         (bp->tx_quick_cons_trip_int << 16) |
4959                          bp->tx_quick_cons_trip);
4960
4961                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4962                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4963
4964                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4965                        (bp->rx_quick_cons_trip_int << 16) |
4966                         bp->rx_quick_cons_trip);
4967
4968                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4969                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4970         }
4971
4972         /* Clear internal stats counters. */
4973         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4974
4975         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4976
4977         /* Initialize the receive filter. */
4978         bnx2_set_rx_mode(bp->dev);
4979
4980         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4981                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4982                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4983                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4984         }
4985         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4986                           1, 0);
4987
4988         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4989         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4990
4991         udelay(20);
4992
4993         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4994
4995         return rc;
4996 }
4997
4998 static void
4999 bnx2_clear_ring_states(struct bnx2 *bp)
5000 {
5001         struct bnx2_napi *bnapi;
5002         struct bnx2_tx_ring_info *txr;
5003         struct bnx2_rx_ring_info *rxr;
5004         int i;
5005
5006         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5007                 bnapi = &bp->bnx2_napi[i];
5008                 txr = &bnapi->tx_ring;
5009                 rxr = &bnapi->rx_ring;
5010
5011                 txr->tx_cons = 0;
5012                 txr->hw_tx_cons = 0;
5013                 rxr->rx_prod_bseq = 0;
5014                 rxr->rx_prod = 0;
5015                 rxr->rx_cons = 0;
5016                 rxr->rx_pg_prod = 0;
5017                 rxr->rx_pg_cons = 0;
5018         }
5019 }
5020
5021 static void
5022 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5023 {
5024         u32 val, offset0, offset1, offset2, offset3;
5025         u32 cid_addr = GET_CID_ADDR(cid);
5026
5027         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5028                 offset0 = BNX2_L2CTX_TYPE_XI;
5029                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5030                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5031                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5032         } else {
5033                 offset0 = BNX2_L2CTX_TYPE;
5034                 offset1 = BNX2_L2CTX_CMD_TYPE;
5035                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5036                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5037         }
5038         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5039         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5040
5041         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5042         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5043
5044         val = (u64) txr->tx_desc_mapping >> 32;
5045         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5046
5047         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5048         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5049 }
5050
5051 static void
5052 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5053 {
5054         struct tx_bd *txbd;
5055         u32 cid = TX_CID;
5056         struct bnx2_napi *bnapi;
5057         struct bnx2_tx_ring_info *txr;
5058
5059         bnapi = &bp->bnx2_napi[ring_num];
5060         txr = &bnapi->tx_ring;
5061
5062         if (ring_num == 0)
5063                 cid = TX_CID;
5064         else
5065                 cid = TX_TSS_CID + ring_num - 1;
5066
5067         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5068
5069         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5070
5071         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5072         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5073
5074         txr->tx_prod = 0;
5075         txr->tx_prod_bseq = 0;
5076
5077         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5078         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5079
5080         bnx2_init_tx_context(bp, cid, txr);
5081 }
5082
5083 static void
5084 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5085                      int num_rings)
5086 {
5087         int i;
5088         struct rx_bd *rxbd;
5089
5090         for (i = 0; i < num_rings; i++) {
5091                 int j;
5092
5093                 rxbd = &rx_ring[i][0];
5094                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5095                         rxbd->rx_bd_len = buf_size;
5096                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5097                 }
5098                 if (i == (num_rings - 1))
5099                         j = 0;
5100                 else
5101                         j = i + 1;
5102                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5103                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5104         }
5105 }
5106
5107 static void
5108 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5109 {
5110         int i;
5111         u16 prod, ring_prod;
5112         u32 cid, rx_cid_addr, val;
5113         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5115
5116         if (ring_num == 0)
5117                 cid = RX_CID;
5118         else
5119                 cid = RX_RSS_CID + ring_num - 1;
5120
5121         rx_cid_addr = GET_CID_ADDR(cid);
5122
5123         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5124                              bp->rx_buf_use_size, bp->rx_max_ring);
5125
5126         bnx2_init_rx_context(bp, cid);
5127
5128         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5129                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5130                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5131         }
5132
5133         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5134         if (bp->rx_pg_ring_size) {
5135                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5136                                      rxr->rx_pg_desc_mapping,
5137                                      PAGE_SIZE, bp->rx_max_pg_ring);
5138                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5141                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5142
5143                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5144                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5145
5146                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5147                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5148
5149                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5150                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5151         }
5152
5153         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5154         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5155
5156         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5157         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5158
5159         ring_prod = prod = rxr->rx_pg_prod;
5160         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5161                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5162                         printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
5163                                                 "with %d/%d pages only\n",
5164                                bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5165                         break;
5166                 }
5167                 prod = NEXT_RX_BD(prod);
5168                 ring_prod = RX_PG_RING_IDX(prod);
5169         }
5170         rxr->rx_pg_prod = prod;
5171
5172         ring_prod = prod = rxr->rx_prod;
5173         for (i = 0; i < bp->rx_ring_size; i++) {
5174                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5175                         printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
5176                                                 "%d/%d skbs only\n",
5177                                bp->dev->name, ring_num, i, bp->rx_ring_size);
5178                         break;
5179                 }
5180                 prod = NEXT_RX_BD(prod);
5181                 ring_prod = RX_RING_IDX(prod);
5182         }
5183         rxr->rx_prod = prod;
5184
5185         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5186         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5187         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5188
5189         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5190         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5191
5192         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5193 }
5194
5195 static void
5196 bnx2_init_all_rings(struct bnx2 *bp)
5197 {
5198         int i;
5199         u32 val;
5200
5201         bnx2_clear_ring_states(bp);
5202
5203         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5204         for (i = 0; i < bp->num_tx_rings; i++)
5205                 bnx2_init_tx_ring(bp, i);
5206
5207         if (bp->num_tx_rings > 1)
5208                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5209                        (TX_TSS_CID << 7));
5210
5211         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5212         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5213
5214         for (i = 0; i < bp->num_rx_rings; i++)
5215                 bnx2_init_rx_ring(bp, i);
5216
5217         if (bp->num_rx_rings > 1) {
5218                 u32 tbl_32;
5219                 u8 *tbl = (u8 *) &tbl_32;
5220
5221                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5222                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5223
5224                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5225                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5226                         if ((i % 4) == 3)
5227                                 bnx2_reg_wr_ind(bp,
5228                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5229                                                 cpu_to_be32(tbl_32));
5230                 }
5231
5232                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5233                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5234
5235                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5236
5237         }
5238 }
5239
5240 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5241 {
5242         u32 max, num_rings = 1;
5243
5244         while (ring_size > MAX_RX_DESC_CNT) {
5245                 ring_size -= MAX_RX_DESC_CNT;
5246                 num_rings++;
5247         }
5248         /* round to next power of 2 */
5249         max = max_size;
5250         while ((max & num_rings) == 0)
5251                 max >>= 1;
5252
5253         if (num_rings != max)
5254                 max <<= 1;
5255
5256         return max;
5257 }
5258
5259 static void
5260 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5261 {
5262         u32 rx_size, rx_space, jumbo_size;
5263
5264         /* 8 for CRC and VLAN */
5265         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5266
5267         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5268                 sizeof(struct skb_shared_info);
5269
5270         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5271         bp->rx_pg_ring_size = 0;
5272         bp->rx_max_pg_ring = 0;
5273         bp->rx_max_pg_ring_idx = 0;
5274         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5275                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5276
5277                 jumbo_size = size * pages;
5278                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5279                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5280
5281                 bp->rx_pg_ring_size = jumbo_size;
5282                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5283                                                         MAX_RX_PG_RINGS);
5284                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5285                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5286                 bp->rx_copy_thresh = 0;
5287         }
5288
5289         bp->rx_buf_use_size = rx_size;
5290         /* hw alignment */
5291         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5292         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5293         bp->rx_ring_size = size;
5294         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5295         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5296 }
5297
5298 static void
5299 bnx2_free_tx_skbs(struct bnx2 *bp)
5300 {
5301         int i;
5302
5303         for (i = 0; i < bp->num_tx_rings; i++) {
5304                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5305                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5306                 int j;
5307
5308                 if (txr->tx_buf_ring == NULL)
5309                         continue;
5310
5311                 for (j = 0; j < TX_DESC_CNT; ) {
5312                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5313                         struct sk_buff *skb = tx_buf->skb;
5314                         int k, last;
5315
5316                         if (skb == NULL) {
5317                                 j++;
5318                                 continue;
5319                         }
5320
5321                         pci_unmap_single(bp->pdev,
5322                                          pci_unmap_addr(tx_buf, mapping),
5323                                          skb_headlen(skb),
5324                                          PCI_DMA_TODEVICE);
5325
5326                         tx_buf->skb = NULL;
5327
5328                         last = tx_buf->nr_frags;
5329                         j++;
5330                         for (k = 0; k < last; k++, j++) {
5331                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5332                                 pci_unmap_page(bp->pdev,
5333                                         pci_unmap_addr(tx_buf, mapping),
5334                                         skb_shinfo(skb)->frags[k].size,
5335                                         PCI_DMA_TODEVICE);
5336                         }
5337                         dev_kfree_skb(skb);
5338                 }
5339         }
5340 }
5341
5342 static void
5343 bnx2_free_rx_skbs(struct bnx2 *bp)
5344 {
5345         int i;
5346
5347         for (i = 0; i < bp->num_rx_rings; i++) {
5348                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5349                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5350                 int j;
5351
5352                 if (rxr->rx_buf_ring == NULL)
5353                         return;
5354
5355                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5356                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5357                         struct sk_buff *skb = rx_buf->skb;
5358
5359                         if (skb == NULL)
5360                                 continue;
5361
5362                         pci_unmap_single(bp->pdev,
5363                                          pci_unmap_addr(rx_buf, mapping),
5364                                          bp->rx_buf_use_size,
5365                                          PCI_DMA_FROMDEVICE);
5366
5367                         rx_buf->skb = NULL;
5368
5369                         dev_kfree_skb(skb);
5370                 }
5371                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5372                         bnx2_free_rx_page(bp, rxr, j);
5373         }
5374 }
5375
5376 static void
5377 bnx2_free_skbs(struct bnx2 *bp)
5378 {
5379         bnx2_free_tx_skbs(bp);
5380         bnx2_free_rx_skbs(bp);
5381 }
5382
5383 static int
5384 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5385 {
5386         int rc;
5387
5388         rc = bnx2_reset_chip(bp, reset_code);
5389         bnx2_free_skbs(bp);
5390         if (rc)
5391                 return rc;
5392
5393         if ((rc = bnx2_init_chip(bp)) != 0)
5394                 return rc;
5395
5396         bnx2_init_all_rings(bp);
5397         return 0;
5398 }
5399
5400 static int
5401 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5402 {
5403         int rc;
5404
5405         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5406                 return rc;
5407
5408         spin_lock_bh(&bp->phy_lock);
5409         bnx2_init_phy(bp, reset_phy);
5410         bnx2_set_link(bp);
5411         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5412                 bnx2_remote_phy_event(bp);
5413         spin_unlock_bh(&bp->phy_lock);
5414         return 0;
5415 }
5416
5417 static int
5418 bnx2_shutdown_chip(struct bnx2 *bp)
5419 {
5420         u32 reset_code;
5421
5422         if (bp->flags & BNX2_FLAG_NO_WOL)
5423                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5424         else if (bp->wol)
5425                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5426         else
5427                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5428
5429         return bnx2_reset_chip(bp, reset_code);
5430 }
5431
5432 static int
5433 bnx2_test_registers(struct bnx2 *bp)
5434 {
5435         int ret;
5436         int i, is_5709;
5437         static const struct {
5438                 u16   offset;
5439                 u16   flags;
5440 #define BNX2_FL_NOT_5709        1
5441                 u32   rw_mask;
5442                 u32   ro_mask;
5443         } reg_tbl[] = {
5444                 { 0x006c, 0, 0x00000000, 0x0000003f },
5445                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5446                 { 0x0094, 0, 0x00000000, 0x00000000 },
5447
5448                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5449                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5452                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5453                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5454                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5455                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5456                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5457
5458                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5460                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5461                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5462                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5463                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5464
5465                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5466                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5467                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5468
5469                 { 0x1000, 0, 0x00000000, 0x00000001 },
5470                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5471
5472                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5473                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5474                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5475                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5476                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5477                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5478                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5479                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5480                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5481                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5482
5483                 { 0x1800, 0, 0x00000000, 0x00000001 },
5484                 { 0x1804, 0, 0x00000000, 0x00000003 },
5485
5486                 { 0x2800, 0, 0x00000000, 0x00000001 },
5487                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5488                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5489                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5490                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5491                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5492                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5493                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5494                 { 0x2840, 0, 0x00000000, 0xffffffff },
5495                 { 0x2844, 0, 0x00000000, 0xffffffff },
5496                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5497                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5498
5499                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5500                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5501
5502                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5503                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5504                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5505                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5506                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5507                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5508                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5509                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5510                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5511
5512                 { 0x5004, 0, 0x00000000, 0x0000007f },
5513                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5514
5515                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5516                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5517                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5518                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5519                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5520                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5521                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5522                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5523                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5524
5525                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5526                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5527                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5528                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5529                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5530                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5531                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5532                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5533                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5534                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5535                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5536                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5537                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5538                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5539                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5540                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5541                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5542                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5543                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5544                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5545                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5546                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5547                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5548
5549                 { 0xffff, 0, 0x00000000, 0x00000000 },
5550         };
5551
5552         ret = 0;
5553         is_5709 = 0;
5554         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5555                 is_5709 = 1;
5556
5557         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5558                 u32 offset, rw_mask, ro_mask, save_val, val;
5559                 u16 flags = reg_tbl[i].flags;
5560
5561                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5562                         continue;
5563
5564                 offset = (u32) reg_tbl[i].offset;
5565                 rw_mask = reg_tbl[i].rw_mask;
5566                 ro_mask = reg_tbl[i].ro_mask;
5567
5568                 save_val = readl(bp->regview + offset);
5569
5570                 writel(0, bp->regview + offset);
5571
5572                 val = readl(bp->regview + offset);
5573                 if ((val & rw_mask) != 0) {
5574                         goto reg_test_err;
5575                 }
5576
5577                 if ((val & ro_mask) != (save_val & ro_mask)) {
5578                         goto reg_test_err;
5579                 }
5580
5581                 writel(0xffffffff, bp->regview + offset);
5582
5583                 val = readl(bp->regview + offset);
5584                 if ((val & rw_mask) != rw_mask) {
5585                         goto reg_test_err;
5586                 }
5587
5588                 if ((val & ro_mask) != (save_val & ro_mask)) {
5589                         goto reg_test_err;
5590                 }
5591
5592                 writel(save_val, bp->regview + offset);
5593                 continue;
5594
5595 reg_test_err:
5596                 writel(save_val, bp->regview + offset);
5597                 ret = -ENODEV;
5598                 break;
5599         }
5600         return ret;
5601 }
5602
5603 static int
5604 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5605 {
5606         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5607                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5608         int i;
5609
5610         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5611                 u32 offset;
5612
5613                 for (offset = 0; offset < size; offset += 4) {
5614
5615                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5616
5617                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5618                                 test_pattern[i]) {
5619                                 return -ENODEV;
5620                         }
5621                 }
5622         }
5623         return 0;
5624 }
5625
5626 static int
5627 bnx2_test_memory(struct bnx2 *bp)
5628 {
5629         int ret = 0;
5630         int i;
5631         static struct mem_entry {
5632                 u32   offset;
5633                 u32   len;
5634         } mem_tbl_5706[] = {
5635                 { 0x60000,  0x4000 },
5636                 { 0xa0000,  0x3000 },
5637                 { 0xe0000,  0x4000 },
5638                 { 0x120000, 0x4000 },
5639                 { 0x1a0000, 0x4000 },
5640                 { 0x160000, 0x4000 },
5641                 { 0xffffffff, 0    },
5642         },
5643         mem_tbl_5709[] = {
5644                 { 0x60000,  0x4000 },
5645                 { 0xa0000,  0x3000 },
5646                 { 0xe0000,  0x4000 },
5647                 { 0x120000, 0x4000 },
5648                 { 0x1a0000, 0x4000 },
5649                 { 0xffffffff, 0    },
5650         };
5651         struct mem_entry *mem_tbl;
5652
5653         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5654                 mem_tbl = mem_tbl_5709;
5655         else
5656                 mem_tbl = mem_tbl_5706;
5657
5658         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5659                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5660                         mem_tbl[i].len)) != 0) {
5661                         return ret;
5662                 }
5663         }
5664
5665         return ret;
5666 }
5667
5668 #define BNX2_MAC_LOOPBACK       0
5669 #define BNX2_PHY_LOOPBACK       1
5670
5671 static int
5672 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5673 {
5674         unsigned int pkt_size, num_pkts, i;
5675         struct sk_buff *skb, *rx_skb;
5676         unsigned char *packet;
5677         u16 rx_start_idx, rx_idx;
5678         dma_addr_t map;
5679         struct tx_bd *txbd;
5680         struct sw_bd *rx_buf;
5681         struct l2_fhdr *rx_hdr;
5682         int ret = -ENODEV;
5683         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5684         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5685         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5686
5687         tx_napi = bnapi;
5688
5689         txr = &tx_napi->tx_ring;
5690         rxr = &bnapi->rx_ring;
5691         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5692                 bp->loopback = MAC_LOOPBACK;
5693                 bnx2_set_mac_loopback(bp);
5694         }
5695         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5696                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5697                         return 0;
5698
5699                 bp->loopback = PHY_LOOPBACK;
5700                 bnx2_set_phy_loopback(bp);
5701         }
5702         else
5703                 return -EINVAL;
5704
5705         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5706         skb = netdev_alloc_skb(bp->dev, pkt_size);
5707         if (!skb)
5708                 return -ENOMEM;
5709         packet = skb_put(skb, pkt_size);
5710         memcpy(packet, bp->dev->dev_addr, 6);
5711         memset(packet + 6, 0x0, 8);
5712         for (i = 14; i < pkt_size; i++)
5713                 packet[i] = (unsigned char) (i & 0xff);
5714
5715         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5716                 PCI_DMA_TODEVICE);
5717         if (pci_dma_mapping_error(bp->pdev, map)) {
5718                 dev_kfree_skb(skb);
5719                 return -EIO;
5720         }
5721
5722         REG_WR(bp, BNX2_HC_COMMAND,
5723                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5724
5725         REG_RD(bp, BNX2_HC_COMMAND);
5726
5727         udelay(5);
5728         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5729
5730         num_pkts = 0;
5731
5732         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5733
5734         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5735         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5736         txbd->tx_bd_mss_nbytes = pkt_size;
5737         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5738
5739         num_pkts++;
5740         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5741         txr->tx_prod_bseq += pkt_size;
5742
5743         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5744         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5745
5746         udelay(100);
5747
5748         REG_WR(bp, BNX2_HC_COMMAND,
5749                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5750
5751         REG_RD(bp, BNX2_HC_COMMAND);
5752
5753         udelay(5);
5754
5755         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5756         dev_kfree_skb(skb);
5757
5758         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5759                 goto loopback_test_done;
5760
5761         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5762         if (rx_idx != rx_start_idx + num_pkts) {
5763                 goto loopback_test_done;
5764         }
5765
5766         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5767         rx_skb = rx_buf->skb;
5768
5769         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5770         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5771
5772         pci_dma_sync_single_for_cpu(bp->pdev,
5773                 pci_unmap_addr(rx_buf, mapping),
5774                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5775
5776         if (rx_hdr->l2_fhdr_status &
5777                 (L2_FHDR_ERRORS_BAD_CRC |
5778                 L2_FHDR_ERRORS_PHY_DECODE |
5779                 L2_FHDR_ERRORS_ALIGNMENT |
5780                 L2_FHDR_ERRORS_TOO_SHORT |
5781                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5782
5783                 goto loopback_test_done;
5784         }
5785
5786         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5787                 goto loopback_test_done;
5788         }
5789
5790         for (i = 14; i < pkt_size; i++) {
5791                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5792                         goto loopback_test_done;
5793                 }
5794         }
5795
5796         ret = 0;
5797
5798 loopback_test_done:
5799         bp->loopback = 0;
5800         return ret;
5801 }
5802
5803 #define BNX2_MAC_LOOPBACK_FAILED        1
5804 #define BNX2_PHY_LOOPBACK_FAILED        2
5805 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5806                                          BNX2_PHY_LOOPBACK_FAILED)
5807
5808 static int
5809 bnx2_test_loopback(struct bnx2 *bp)
5810 {
5811         int rc = 0;
5812
5813         if (!netif_running(bp->dev))
5814                 return BNX2_LOOPBACK_FAILED;
5815
5816         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5817         spin_lock_bh(&bp->phy_lock);
5818         bnx2_init_phy(bp, 1);
5819         spin_unlock_bh(&bp->phy_lock);
5820         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5821                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5822         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5823                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5824         return rc;
5825 }
5826
5827 #define NVRAM_SIZE 0x200
5828 #define CRC32_RESIDUAL 0xdebb20e3
5829
5830 static int
5831 bnx2_test_nvram(struct bnx2 *bp)
5832 {
5833         __be32 buf[NVRAM_SIZE / 4];
5834         u8 *data = (u8 *) buf;
5835         int rc = 0;
5836         u32 magic, csum;
5837
5838         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5839                 goto test_nvram_done;
5840
5841         magic = be32_to_cpu(buf[0]);
5842         if (magic != 0x669955aa) {
5843                 rc = -ENODEV;
5844                 goto test_nvram_done;
5845         }
5846
5847         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5848                 goto test_nvram_done;
5849
5850         csum = ether_crc_le(0x100, data);
5851         if (csum != CRC32_RESIDUAL) {
5852                 rc = -ENODEV;
5853                 goto test_nvram_done;
5854         }
5855
5856         csum = ether_crc_le(0x100, data + 0x100);
5857         if (csum != CRC32_RESIDUAL) {
5858                 rc = -ENODEV;
5859         }
5860
5861 test_nvram_done:
5862         return rc;
5863 }
5864
5865 static int
5866 bnx2_test_link(struct bnx2 *bp)
5867 {
5868         u32 bmsr;
5869
5870         if (!netif_running(bp->dev))
5871                 return -ENODEV;
5872
5873         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5874                 if (bp->link_up)
5875                         return 0;
5876                 return -ENODEV;
5877         }
5878         spin_lock_bh(&bp->phy_lock);
5879         bnx2_enable_bmsr1(bp);
5880         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5881         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5882         bnx2_disable_bmsr1(bp);
5883         spin_unlock_bh(&bp->phy_lock);
5884
5885         if (bmsr & BMSR_LSTATUS) {
5886                 return 0;
5887         }
5888         return -ENODEV;
5889 }
5890
5891 static int
5892 bnx2_test_intr(struct bnx2 *bp)
5893 {
5894         int i;
5895         u16 status_idx;
5896
5897         if (!netif_running(bp->dev))
5898                 return -ENODEV;
5899
5900         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5901
5902         /* This register is not touched during run-time. */
5903         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5904         REG_RD(bp, BNX2_HC_COMMAND);
5905
5906         for (i = 0; i < 10; i++) {
5907                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5908                         status_idx) {
5909
5910                         break;
5911                 }
5912
5913                 msleep_interruptible(10);
5914         }
5915         if (i < 10)
5916                 return 0;
5917
5918         return -ENODEV;
5919 }
5920
5921 /* Determining link for parallel detection. */
5922 static int
5923 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5924 {
5925         u32 mode_ctl, an_dbg, exp;
5926
5927         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5928                 return 0;
5929
5930         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5931         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5932
5933         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5934                 return 0;
5935
5936         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5937         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5938         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5939
5940         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5941                 return 0;
5942
5943         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5944         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5945         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5946
5947         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5948                 return 0;
5949
5950         return 1;
5951 }
5952
5953 static void
5954 bnx2_5706_serdes_timer(struct bnx2 *bp)
5955 {
5956         int check_link = 1;
5957
5958         spin_lock(&bp->phy_lock);
5959         if (bp->serdes_an_pending) {
5960                 bp->serdes_an_pending--;
5961                 check_link = 0;
5962         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5963                 u32 bmcr;
5964
5965                 bp->current_interval = BNX2_TIMER_INTERVAL;
5966
5967                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5968
5969                 if (bmcr & BMCR_ANENABLE) {
5970                         if (bnx2_5706_serdes_has_link(bp)) {
5971                                 bmcr &= ~BMCR_ANENABLE;
5972                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5973                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5974                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5975                         }
5976                 }
5977         }
5978         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5979                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5980                 u32 phy2;
5981
5982                 bnx2_write_phy(bp, 0x17, 0x0f01);
5983                 bnx2_read_phy(bp, 0x15, &phy2);
5984                 if (phy2 & 0x20) {
5985                         u32 bmcr;
5986
5987                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5988                         bmcr |= BMCR_ANENABLE;
5989                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5990
5991                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5992                 }
5993         } else
5994                 bp->current_interval = BNX2_TIMER_INTERVAL;
5995
5996         if (check_link) {
5997                 u32 val;
5998
5999                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6000                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6001                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6002
6003                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6004                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6005                                 bnx2_5706s_force_link_dn(bp, 1);
6006                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6007                         } else
6008                                 bnx2_set_link(bp);
6009                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6010                         bnx2_set_link(bp);
6011         }
6012         spin_unlock(&bp->phy_lock);
6013 }
6014
6015 static void
6016 bnx2_5708_serdes_timer(struct bnx2 *bp)
6017 {
6018         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6019                 return;
6020
6021         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6022                 bp->serdes_an_pending = 0;
6023                 return;
6024         }
6025
6026         spin_lock(&bp->phy_lock);
6027         if (bp->serdes_an_pending)
6028                 bp->serdes_an_pending--;
6029         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6030                 u32 bmcr;
6031
6032                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6033                 if (bmcr & BMCR_ANENABLE) {
6034                         bnx2_enable_forced_2g5(bp);
6035                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6036                 } else {
6037                         bnx2_disable_forced_2g5(bp);
6038                         bp->serdes_an_pending = 2;
6039                         bp->current_interval = BNX2_TIMER_INTERVAL;
6040                 }
6041
6042         } else