net: wireless: bcmdhd: Enable wlan access on resume for all sdio functions
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/checksum.h>
43 #include <linux/workqueue.h>
44 #include <linux/crc32.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50
51 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52 #define BCM_CNIC 1
53 #include "cnic_if.h"
54 #endif
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define DRV_MODULE_VERSION      "2.1.11"
60 #define DRV_MODULE_RELDATE      "July 20, 2011"
61 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.1.fw"
62 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1a.fw"
64 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67 #define RUN_AT(x) (jiffies + (x))
68
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79 MODULE_FIRMWARE(FW_MIPS_FILE_06);
80 MODULE_FIRMWARE(FW_RV2P_FILE_06);
81 MODULE_FIRMWARE(FW_MIPS_FILE_09);
82 MODULE_FIRMWARE(FW_RV2P_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
84
85 static int disable_msi = 0;
86
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89
90 typedef enum {
91         BCM5706 = 0,
92         NC370T,
93         NC370I,
94         BCM5706S,
95         NC370F,
96         BCM5708,
97         BCM5708S,
98         BCM5709,
99         BCM5709S,
100         BCM5716,
101         BCM5716S,
102 } board_t;
103
104 /* indexed by board_t, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109         { "HP NC370T Multifunction Gigabit Server Adapter" },
110         { "HP NC370i Multifunction Gigabit Server Adapter" },
111         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112         { "HP NC370F Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
119         };
120
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140         { PCI_VENDOR_ID_BROADCOM, 0x163b,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142         { PCI_VENDOR_ID_BROADCOM, 0x163c,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144         { 0, }
145 };
146
147 static const struct flash_spec flash_table[] =
148 {
149 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
151         /* Slow EEPROM */
152         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155          "EEPROM - slow"},
156         /* Expansion entry 0001 */
157         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160          "Entry 0001"},
161         /* Saifun SA25F010 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166          "Non-buffered flash (128kB)"},
167         /* Saifun SA25F020 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172          "Non-buffered flash (256kB)"},
173         /* Expansion entry 0100 */
174         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 0100"},
178         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188         /* Saifun SA25F005 (non-buffered flash) */
189         /* strap, cfg1, & write1 need updates */
190         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193          "Non-buffered flash (64kB)"},
194         /* Fast EEPROM */
195         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198          "EEPROM - fast"},
199         /* Expansion entry 1001 */
200         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203          "Entry 1001"},
204         /* Expansion entry 1010 */
205         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208          "Entry 1010"},
209         /* ATMEL AT45DB011B (buffered flash) */
210         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213          "Buffered flash (128kB)"},
214         /* Expansion entry 1100 */
215         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218          "Entry 1100"},
219         /* Expansion entry 1101 */
220         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223          "Entry 1101"},
224         /* Ateml Expansion entry 1110 */
225         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228          "Entry 1110 (Atmel)"},
229         /* ATMEL AT45DB021B (buffered flash) */
230         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233          "Buffered flash (256kB)"},
234 };
235
236 static const struct flash_spec flash_5709 = {
237         .flags          = BNX2_NV_BUFFERED,
238         .page_bits      = BCM5709_FLASH_PAGE_BITS,
239         .page_size      = BCM5709_FLASH_PAGE_SIZE,
240         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
241         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
242         .name           = "5709 Buffered flash (256kB)",
243 };
244
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246
247 static void bnx2_init_napi(struct bnx2 *bp);
248 static void bnx2_del_napi(struct bnx2 *bp);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255         barrier();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return bp->tx_ring_size - diff;
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
389                 return -ENODEV;
390
391         bp->cnic_data = data;
392         rcu_assign_pointer(bp->cnic_ops, ops);
393
394         cp->num_irq = 0;
395         cp->drv_state = CNIC_DRV_STATE_REGD;
396
397         bnx2_setup_cnic_irq_info(bp);
398
399         return 0;
400 }
401
402 static int bnx2_unregister_cnic(struct net_device *dev)
403 {
404         struct bnx2 *bp = netdev_priv(dev);
405         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
406         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
407
408         mutex_lock(&bp->cnic_lock);
409         cp->drv_state = 0;
410         bnapi->cnic_present = 0;
411         rcu_assign_pointer(bp->cnic_ops, NULL);
412         mutex_unlock(&bp->cnic_lock);
413         synchronize_rcu();
414         return 0;
415 }
416
417 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
418 {
419         struct bnx2 *bp = netdev_priv(dev);
420         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
421
422         if (!cp->max_iscsi_conn)
423                 return NULL;
424
425         cp->drv_owner = THIS_MODULE;
426         cp->chip_id = bp->chip_id;
427         cp->pdev = bp->pdev;
428         cp->io_base = bp->regview;
429         cp->drv_ctl = bnx2_drv_ctl;
430         cp->drv_register_cnic = bnx2_register_cnic;
431         cp->drv_unregister_cnic = bnx2_unregister_cnic;
432
433         return cp;
434 }
435 EXPORT_SYMBOL(bnx2_cnic_probe);
436
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
439 {
440         struct cnic_ops *c_ops;
441         struct cnic_ctl_info info;
442
443         mutex_lock(&bp->cnic_lock);
444         c_ops = rcu_dereference_protected(bp->cnic_ops,
445                                           lockdep_is_held(&bp->cnic_lock));
446         if (c_ops) {
447                 info.cmd = CNIC_CTL_STOP_CMD;
448                 c_ops->cnic_ctl(bp->cnic_data, &info);
449         }
450         mutex_unlock(&bp->cnic_lock);
451 }
452
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
455 {
456         struct cnic_ops *c_ops;
457         struct cnic_ctl_info info;
458
459         mutex_lock(&bp->cnic_lock);
460         c_ops = rcu_dereference_protected(bp->cnic_ops,
461                                           lockdep_is_held(&bp->cnic_lock));
462         if (c_ops) {
463                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465
466                         bnapi->cnic_tag = bnapi->last_status_idx;
467                 }
468                 info.cmd = CNIC_CTL_START_CMD;
469                 c_ops->cnic_ctl(bp->cnic_data, &info);
470         }
471         mutex_unlock(&bp->cnic_lock);
472 }
473
474 #else
475
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
478 {
479 }
480
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
483 {
484 }
485
486 #endif
487
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490 {
491         u32 val1;
492         int i, ret;
493
494         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
496                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497
498                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
500
501                 udelay(40);
502         }
503
504         val1 = (bp->phy_addr << 21) | (reg << 16) |
505                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506                 BNX2_EMAC_MDIO_COMM_START_BUSY;
507         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508
509         for (i = 0; i < 50; i++) {
510                 udelay(10);
511
512                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
513                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514                         udelay(5);
515
516                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
517                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518
519                         break;
520                 }
521         }
522
523         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524                 *val = 0x0;
525                 ret = -EBUSY;
526         }
527         else {
528                 *val = val1;
529                 ret = 0;
530         }
531
532         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
534                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535
536                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
538
539                 udelay(40);
540         }
541
542         return ret;
543 }
544
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547 {
548         u32 val1;
549         int i, ret;
550
551         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
553                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554
555                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
557
558                 udelay(40);
559         }
560
561         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
565
566         for (i = 0; i < 50; i++) {
567                 udelay(10);
568
569                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
570                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571                         udelay(5);
572                         break;
573                 }
574         }
575
576         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577                 ret = -EBUSY;
578         else
579                 ret = 0;
580
581         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
583                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584
585                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
587
588                 udelay(40);
589         }
590
591         return ret;
592 }
593
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
596 {
597         int i;
598         struct bnx2_napi *bnapi;
599
600         for (i = 0; i < bp->irq_nvecs; i++) {
601                 bnapi = &bp->bnx2_napi[i];
602                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604         }
605         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 }
607
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
610 {
611         int i;
612         struct bnx2_napi *bnapi;
613
614         for (i = 0; i < bp->irq_nvecs; i++) {
615                 bnapi = &bp->bnx2_napi[i];
616
617                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620                        bnapi->last_status_idx);
621
622                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624                        bnapi->last_status_idx);
625         }
626         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 }
628
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
631 {
632         int i;
633
634         atomic_inc(&bp->intr_sem);
635         if (!netif_running(bp->dev))
636                 return;
637
638         bnx2_disable_int(bp);
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 synchronize_irq(bp->irq_tbl[i].vector);
641 }
642
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_disable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
654 {
655         int i;
656
657         for (i = 0; i < bp->irq_nvecs; i++)
658                 napi_enable(&bp->bnx2_napi[i].napi);
659 }
660
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
663 {
664         if (stop_cnic)
665                 bnx2_cnic_stop(bp);
666         if (netif_running(bp->dev)) {
667                 bnx2_napi_disable(bp);
668                 netif_tx_disable(bp->dev);
669         }
670         bnx2_disable_int_sync(bp);
671         netif_carrier_off(bp->dev);     /* prevent tx timeout */
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         spin_lock_bh(&bp->phy_lock);
681                         if (bp->link_up)
682                                 netif_carrier_on(bp->dev);
683                         spin_unlock_bh(&bp->phy_lock);
684                         bnx2_napi_enable(bp);
685                         bnx2_enable_int(bp);
686                         if (start_cnic)
687                                 bnx2_cnic_start(bp);
688                 }
689         }
690 }
691
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
694 {
695         int i;
696
697         for (i = 0; i < bp->num_tx_rings; i++) {
698                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700
701                 if (txr->tx_desc_ring) {
702                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703                                           txr->tx_desc_ring,
704                                           txr->tx_desc_mapping);
705                         txr->tx_desc_ring = NULL;
706                 }
707                 kfree(txr->tx_buf_ring);
708                 txr->tx_buf_ring = NULL;
709         }
710 }
711
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
714 {
715         int i;
716
717         for (i = 0; i < bp->num_rx_rings; i++) {
718                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720                 int j;
721
722                 for (j = 0; j < bp->rx_max_ring; j++) {
723                         if (rxr->rx_desc_ring[j])
724                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725                                                   rxr->rx_desc_ring[j],
726                                                   rxr->rx_desc_mapping[j]);
727                         rxr->rx_desc_ring[j] = NULL;
728                 }
729                 vfree(rxr->rx_buf_ring);
730                 rxr->rx_buf_ring = NULL;
731
732                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733                         if (rxr->rx_pg_desc_ring[j])
734                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735                                                   rxr->rx_pg_desc_ring[j],
736                                                   rxr->rx_pg_desc_mapping[j]);
737                         rxr->rx_pg_desc_ring[j] = NULL;
738                 }
739                 vfree(rxr->rx_pg_ring);
740                 rxr->rx_pg_ring = NULL;
741         }
742 }
743
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 {
747         int i;
748
749         for (i = 0; i < bp->num_tx_rings; i++) {
750                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752
753                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754                 if (txr->tx_buf_ring == NULL)
755                         return -ENOMEM;
756
757                 txr->tx_desc_ring =
758                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759                                            &txr->tx_desc_mapping, GFP_KERNEL);
760                 if (txr->tx_desc_ring == NULL)
761                         return -ENOMEM;
762         }
763         return 0;
764 }
765
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 {
769         int i;
770
771         for (i = 0; i < bp->num_rx_rings; i++) {
772                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774                 int j;
775
776                 rxr->rx_buf_ring =
777                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
778                 if (rxr->rx_buf_ring == NULL)
779                         return -ENOMEM;
780
781                 for (j = 0; j < bp->rx_max_ring; j++) {
782                         rxr->rx_desc_ring[j] =
783                                 dma_alloc_coherent(&bp->pdev->dev,
784                                                    RXBD_RING_SIZE,
785                                                    &rxr->rx_desc_mapping[j],
786                                                    GFP_KERNEL);
787                         if (rxr->rx_desc_ring[j] == NULL)
788                                 return -ENOMEM;
789
790                 }
791
792                 if (bp->rx_pg_ring_size) {
793                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
794                                                   bp->rx_max_pg_ring);
795                         if (rxr->rx_pg_ring == NULL)
796                                 return -ENOMEM;
797
798                 }
799
800                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801                         rxr->rx_pg_desc_ring[j] =
802                                 dma_alloc_coherent(&bp->pdev->dev,
803                                                    RXBD_RING_SIZE,
804                                                    &rxr->rx_pg_desc_mapping[j],
805                                                    GFP_KERNEL);
806                         if (rxr->rx_pg_desc_ring[j] == NULL)
807                                 return -ENOMEM;
808
809                 }
810         }
811         return 0;
812 }
813
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
816 {
817         int i;
818         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
819
820         bnx2_free_tx_mem(bp);
821         bnx2_free_rx_mem(bp);
822
823         for (i = 0; i < bp->ctx_pages; i++) {
824                 if (bp->ctx_blk[i]) {
825                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
826                                           bp->ctx_blk[i],
827                                           bp->ctx_blk_mapping[i]);
828                         bp->ctx_blk[i] = NULL;
829                 }
830         }
831         if (bnapi->status_blk.msi) {
832                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833                                   bnapi->status_blk.msi,
834                                   bp->status_blk_mapping);
835                 bnapi->status_blk.msi = NULL;
836                 bp->stats_blk = NULL;
837         }
838 }
839
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
842 {
843         int i, status_blk_size, err;
844         struct bnx2_napi *bnapi;
845         void *status_blk;
846
847         /* Combine status and statistics blocks into one allocation. */
848         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849         if (bp->flags & BNX2_FLAG_MSIX_CAP)
850                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
852         bp->status_stats_size = status_blk_size +
853                                 sizeof(struct statistics_block);
854
855         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856                                         &bp->status_blk_mapping, GFP_KERNEL);
857         if (status_blk == NULL)
858                 goto alloc_mem_err;
859
860         memset(status_blk, 0, bp->status_stats_size);
861
862         bnapi = &bp->bnx2_napi[0];
863         bnapi->status_blk.msi = status_blk;
864         bnapi->hw_tx_cons_ptr =
865                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866         bnapi->hw_rx_cons_ptr =
867                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869                 for (i = 1; i < bp->irq_nvecs; i++) {
870                         struct status_block_msix *sblk;
871
872                         bnapi = &bp->bnx2_napi[i];
873
874                         sblk = (void *) (status_blk +
875                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876                         bnapi->status_blk.msix = sblk;
877                         bnapi->hw_tx_cons_ptr =
878                                 &sblk->status_tx_quick_consumer_index;
879                         bnapi->hw_rx_cons_ptr =
880                                 &sblk->status_rx_quick_consumer_index;
881                         bnapi->int_num = i << 24;
882                 }
883         }
884
885         bp->stats_blk = status_blk + status_blk_size;
886
887         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
888
889         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891                 if (bp->ctx_pages == 0)
892                         bp->ctx_pages = 1;
893                 for (i = 0; i < bp->ctx_pages; i++) {
894                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
895                                                 BCM_PAGE_SIZE,
896                                                 &bp->ctx_blk_mapping[i],
897                                                 GFP_KERNEL);
898                         if (bp->ctx_blk[i] == NULL)
899                                 goto alloc_mem_err;
900                 }
901         }
902
903         err = bnx2_alloc_rx_mem(bp);
904         if (err)
905                 goto alloc_mem_err;
906
907         err = bnx2_alloc_tx_mem(bp);
908         if (err)
909                 goto alloc_mem_err;
910
911         return 0;
912
913 alloc_mem_err:
914         bnx2_free_mem(bp);
915         return -ENOMEM;
916 }
917
918 static void
919 bnx2_report_fw_link(struct bnx2 *bp)
920 {
921         u32 fw_link_status = 0;
922
923         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
924                 return;
925
926         if (bp->link_up) {
927                 u32 bmsr;
928
929                 switch (bp->line_speed) {
930                 case SPEED_10:
931                         if (bp->duplex == DUPLEX_HALF)
932                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
933                         else
934                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
935                         break;
936                 case SPEED_100:
937                         if (bp->duplex == DUPLEX_HALF)
938                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
939                         else
940                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
941                         break;
942                 case SPEED_1000:
943                         if (bp->duplex == DUPLEX_HALF)
944                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945                         else
946                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947                         break;
948                 case SPEED_2500:
949                         if (bp->duplex == DUPLEX_HALF)
950                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951                         else
952                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953                         break;
954                 }
955
956                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958                 if (bp->autoneg) {
959                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
961                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963
964                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967                         else
968                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969                 }
970         }
971         else
972                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
974         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
975 }
976
977 static char *
978 bnx2_xceiver_str(struct bnx2 *bp)
979 {
980         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982                  "Copper");
983 }
984
985 static void
986 bnx2_report_link(struct bnx2 *bp)
987 {
988         if (bp->link_up) {
989                 netif_carrier_on(bp->dev);
990                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991                             bnx2_xceiver_str(bp),
992                             bp->line_speed,
993                             bp->duplex == DUPLEX_FULL ? "full" : "half");
994
995                 if (bp->flow_ctrl) {
996                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
997                                 pr_cont(", receive ");
998                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
999                                         pr_cont("& transmit ");
1000                         }
1001                         else {
1002                                 pr_cont(", transmit ");
1003                         }
1004                         pr_cont("flow control ON");
1005                 }
1006                 pr_cont("\n");
1007         } else {
1008                 netif_carrier_off(bp->dev);
1009                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010                            bnx2_xceiver_str(bp));
1011         }
1012
1013         bnx2_report_fw_link(bp);
1014 }
1015
1016 static void
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 {
1019         u32 local_adv, remote_adv;
1020
1021         bp->flow_ctrl = 0;
1022         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025                 if (bp->duplex == DUPLEX_FULL) {
1026                         bp->flow_ctrl = bp->req_flow_ctrl;
1027                 }
1028                 return;
1029         }
1030
1031         if (bp->duplex != DUPLEX_FULL) {
1032                 return;
1033         }
1034
1035         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037                 u32 val;
1038
1039                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041                         bp->flow_ctrl |= FLOW_CTRL_TX;
1042                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043                         bp->flow_ctrl |= FLOW_CTRL_RX;
1044                 return;
1045         }
1046
1047         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051                 u32 new_local_adv = 0;
1052                 u32 new_remote_adv = 0;
1053
1054                 if (local_adv & ADVERTISE_1000XPAUSE)
1055                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1056                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058                 if (remote_adv & ADVERTISE_1000XPAUSE)
1059                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063                 local_adv = new_local_adv;
1064                 remote_adv = new_remote_adv;
1065         }
1066
1067         /* See Table 28B-3 of 802.3ab-1999 spec. */
1068         if (local_adv & ADVERTISE_PAUSE_CAP) {
1069                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                         }
1073                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074                                 bp->flow_ctrl = FLOW_CTRL_RX;
1075                         }
1076                 }
1077                 else {
1078                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080                         }
1081                 }
1082         }
1083         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087                         bp->flow_ctrl = FLOW_CTRL_TX;
1088                 }
1089         }
1090 }
1091
1092 static int
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1094 {
1095         u32 val, speed;
1096
1097         bp->link_up = 1;
1098
1099         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104                 bp->line_speed = bp->req_line_speed;
1105                 bp->duplex = bp->req_duplex;
1106                 return 0;
1107         }
1108         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109         switch (speed) {
1110                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111                         bp->line_speed = SPEED_10;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114                         bp->line_speed = SPEED_100;
1115                         break;
1116                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118                         bp->line_speed = SPEED_1000;
1119                         break;
1120                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121                         bp->line_speed = SPEED_2500;
1122                         break;
1123         }
1124         if (val & MII_BNX2_GP_TOP_AN_FD)
1125                 bp->duplex = DUPLEX_FULL;
1126         else
1127                 bp->duplex = DUPLEX_HALF;
1128         return 0;
1129 }
1130
1131 static int
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1133 {
1134         u32 val;
1135
1136         bp->link_up = 1;
1137         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139                 case BCM5708S_1000X_STAT1_SPEED_10:
1140                         bp->line_speed = SPEED_10;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_100:
1143                         bp->line_speed = SPEED_100;
1144                         break;
1145                 case BCM5708S_1000X_STAT1_SPEED_1G:
1146                         bp->line_speed = SPEED_1000;
1147                         break;
1148                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149                         bp->line_speed = SPEED_2500;
1150                         break;
1151         }
1152         if (val & BCM5708S_1000X_STAT1_FD)
1153                 bp->duplex = DUPLEX_FULL;
1154         else
1155                 bp->duplex = DUPLEX_HALF;
1156
1157         return 0;
1158 }
1159
1160 static int
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1162 {
1163         u32 bmcr, local_adv, remote_adv, common;
1164
1165         bp->link_up = 1;
1166         bp->line_speed = SPEED_1000;
1167
1168         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169         if (bmcr & BMCR_FULLDPLX) {
1170                 bp->duplex = DUPLEX_FULL;
1171         }
1172         else {
1173                 bp->duplex = DUPLEX_HALF;
1174         }
1175
1176         if (!(bmcr & BMCR_ANENABLE)) {
1177                 return 0;
1178         }
1179
1180         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183         common = local_adv & remote_adv;
1184         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186                 if (common & ADVERTISE_1000XFULL) {
1187                         bp->duplex = DUPLEX_FULL;
1188                 }
1189                 else {
1190                         bp->duplex = DUPLEX_HALF;
1191                 }
1192         }
1193
1194         return 0;
1195 }
1196
1197 static int
1198 bnx2_copper_linkup(struct bnx2 *bp)
1199 {
1200         u32 bmcr;
1201
1202         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203         if (bmcr & BMCR_ANENABLE) {
1204                 u32 local_adv, remote_adv, common;
1205
1206                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209                 common = local_adv & (remote_adv >> 2);
1210                 if (common & ADVERTISE_1000FULL) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_FULL;
1213                 }
1214                 else if (common & ADVERTISE_1000HALF) {
1215                         bp->line_speed = SPEED_1000;
1216                         bp->duplex = DUPLEX_HALF;
1217                 }
1218                 else {
1219                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222                         common = local_adv & remote_adv;
1223                         if (common & ADVERTISE_100FULL) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_100HALF) {
1228                                 bp->line_speed = SPEED_100;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else if (common & ADVERTISE_10FULL) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_FULL;
1234                         }
1235                         else if (common & ADVERTISE_10HALF) {
1236                                 bp->line_speed = SPEED_10;
1237                                 bp->duplex = DUPLEX_HALF;
1238                         }
1239                         else {
1240                                 bp->line_speed = 0;
1241                                 bp->link_up = 0;
1242                         }
1243                 }
1244         }
1245         else {
1246                 if (bmcr & BMCR_SPEED100) {
1247                         bp->line_speed = SPEED_100;
1248                 }
1249                 else {
1250                         bp->line_speed = SPEED_10;
1251                 }
1252                 if (bmcr & BMCR_FULLDPLX) {
1253                         bp->duplex = DUPLEX_FULL;
1254                 }
1255                 else {
1256                         bp->duplex = DUPLEX_HALF;
1257                 }
1258         }
1259
1260         return 0;
1261 }
1262
1263 static void
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 {
1266         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270         val |= 0x02 << 8;
1271
1272         if (bp->flow_ctrl & FLOW_CTRL_TX)
1273                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276 }
1277
1278 static void
1279 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280 {
1281         int i;
1282         u32 cid;
1283
1284         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285                 if (i == 1)
1286                         cid = RX_RSS_CID;
1287                 bnx2_init_rx_context(bp, cid);
1288         }
1289 }
1290
1291 static void
1292 bnx2_set_mac_link(struct bnx2 *bp)
1293 {
1294         u32 val;
1295
1296         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298                 (bp->duplex == DUPLEX_HALF)) {
1299                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300         }
1301
1302         /* Configure the EMAC mode register. */
1303         val = REG_RD(bp, BNX2_EMAC_MODE);
1304
1305         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307                 BNX2_EMAC_MODE_25G_MODE);
1308
1309         if (bp->link_up) {
1310                 switch (bp->line_speed) {
1311                         case SPEED_10:
1312                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1314                                         break;
1315                                 }
1316                                 /* fall through */
1317                         case SPEED_100:
1318                                 val |= BNX2_EMAC_MODE_PORT_MII;
1319                                 break;
1320                         case SPEED_2500:
1321                                 val |= BNX2_EMAC_MODE_25G_MODE;
1322                                 /* fall through */
1323                         case SPEED_1000:
1324                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1325                                 break;
1326                 }
1327         }
1328         else {
1329                 val |= BNX2_EMAC_MODE_PORT_GMII;
1330         }
1331
1332         /* Set the MAC to operate in the appropriate duplex mode. */
1333         if (bp->duplex == DUPLEX_HALF)
1334                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335         REG_WR(bp, BNX2_EMAC_MODE, val);
1336
1337         /* Enable/disable rx PAUSE. */
1338         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340         if (bp->flow_ctrl & FLOW_CTRL_RX)
1341                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344         /* Enable/disable tx PAUSE. */
1345         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348         if (bp->flow_ctrl & FLOW_CTRL_TX)
1349                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352         /* Acknowledge the interrupt. */
1353         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
1355         bnx2_init_all_rx_contexts(bp);
1356 }
1357
1358 static void
1359 bnx2_enable_bmsr1(struct bnx2 *bp)
1360 {
1361         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362             (CHIP_NUM(bp) == CHIP_NUM_5709))
1363                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364                                MII_BNX2_BLK_ADDR_GP_STATUS);
1365 }
1366
1367 static void
1368 bnx2_disable_bmsr1(struct bnx2 *bp)
1369 {
1370         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371             (CHIP_NUM(bp) == CHIP_NUM_5709))
1372                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374 }
1375
1376 static int
1377 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378 {
1379         u32 up1;
1380         int ret = 1;
1381
1382         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383                 return 0;
1384
1385         if (bp->autoneg & AUTONEG_SPEED)
1386                 bp->advertising |= ADVERTISED_2500baseX_Full;
1387
1388         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
1391         bnx2_read_phy(bp, bp->mii_up1, &up1);
1392         if (!(up1 & BCM5708S_UP1_2G5)) {
1393                 up1 |= BCM5708S_UP1_2G5;
1394                 bnx2_write_phy(bp, bp->mii_up1, up1);
1395                 ret = 0;
1396         }
1397
1398         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
1402         return ret;
1403 }
1404
1405 static int
1406 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407 {
1408         u32 up1;
1409         int ret = 0;
1410
1411         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412                 return 0;
1413
1414         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417         bnx2_read_phy(bp, bp->mii_up1, &up1);
1418         if (up1 & BCM5708S_UP1_2G5) {
1419                 up1 &= ~BCM5708S_UP1_2G5;
1420                 bnx2_write_phy(bp, bp->mii_up1, up1);
1421                 ret = 1;
1422         }
1423
1424         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428         return ret;
1429 }
1430
1431 static void
1432 bnx2_enable_forced_2g5(struct bnx2 *bp)
1433 {
1434         u32 uninitialized_var(bmcr);
1435         int err;
1436
1437         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438                 return;
1439
1440         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1441                 u32 val;
1442
1443                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1445                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447                         val |= MII_BNX2_SD_MISC1_FORCE |
1448                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450                 }
1451
1452                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455
1456         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1457                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458                 if (!err)
1459                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1460         } else {
1461                 return;
1462         }
1463
1464         if (err)
1465                 return;
1466
1467         if (bp->autoneg & AUTONEG_SPEED) {
1468                 bmcr &= ~BMCR_ANENABLE;
1469                 if (bp->req_duplex == DUPLEX_FULL)
1470                         bmcr |= BMCR_FULLDPLX;
1471         }
1472         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473 }
1474
1475 static void
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 {
1478         u32 uninitialized_var(bmcr);
1479         int err;
1480
1481         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482                 return;
1483
1484         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1485                 u32 val;
1486
1487                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1489                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1491                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492                 }
1493
1494                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500                 if (!err)
1501                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1502         } else {
1503                 return;
1504         }
1505
1506         if (err)
1507                 return;
1508
1509         if (bp->autoneg & AUTONEG_SPEED)
1510                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517         u32 val;
1518
1519         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521         if (start)
1522                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523         else
1524                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530         u32 bmsr;
1531         u8 link_up;
1532
1533         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534                 bp->link_up = 1;
1535                 return 0;
1536         }
1537
1538         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539                 return 0;
1540
1541         link_up = bp->link_up;
1542
1543         bnx2_enable_bmsr1(bp);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_disable_bmsr1(bp);
1547
1548         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550                 u32 val, an_dbg;
1551
1552                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553                         bnx2_5706s_force_link_dn(bp, 0);
1554                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555                 }
1556                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564                         bmsr |= BMSR_LSTATUS;
1565                 else
1566                         bmsr &= ~BMSR_LSTATUS;
1567         }
1568
1569         if (bmsr & BMSR_LSTATUS) {
1570                 bp->link_up = 1;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574                                 bnx2_5706s_linkup(bp);
1575                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576                                 bnx2_5708s_linkup(bp);
1577                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578                                 bnx2_5709s_linkup(bp);
1579                 }
1580                 else {
1581                         bnx2_copper_linkup(bp);
1582                 }
1583                 bnx2_resolve_flow_ctrl(bp);
1584         }
1585         else {
1586                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587                     (bp->autoneg & AUTONEG_SPEED))
1588                         bnx2_disable_forced_2g5(bp);
1589
1590                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591                         u32 bmcr;
1592
1593                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594                         bmcr |= BMCR_ANENABLE;
1595                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598                 }
1599                 bp->link_up = 0;
1600         }
1601
1602         if (bp->link_up != link_up) {
1603                 bnx2_report_link(bp);
1604         }
1605
1606         bnx2_set_mac_link(bp);
1607
1608         return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614         int i;
1615         u32 reg;
1616
1617         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621                 udelay(10);
1622
1623                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624                 if (!(reg & BMCR_RESET)) {
1625                         udelay(20);
1626                         break;
1627                 }
1628         }
1629         if (i == PHY_RESET_MAX_WAIT) {
1630                 return -EBUSY;
1631         }
1632         return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638         u32 adv = 0;
1639
1640         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPAUSE;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_CAP;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676         u32 speed_arg = 0, pause_adv;
1677
1678         pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680         if (bp->autoneg & AUTONEG_SPEED) {
1681                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682                 if (bp->advertising & ADVERTISED_10baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684                 if (bp->advertising & ADVERTISED_10baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686                 if (bp->advertising & ADVERTISED_100baseT_Half)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688                 if (bp->advertising & ADVERTISED_100baseT_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694         } else {
1695                 if (bp->req_line_speed == SPEED_2500)
1696                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697                 else if (bp->req_line_speed == SPEED_1000)
1698                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 else if (bp->req_line_speed == SPEED_100) {
1700                         if (bp->req_duplex == DUPLEX_FULL)
1701                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702                         else
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704                 } else if (bp->req_line_speed == SPEED_10) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 }
1710         }
1711
1712         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717         if (port == PORT_TP)
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723         spin_unlock_bh(&bp->phy_lock);
1724         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725         spin_lock_bh(&bp->phy_lock);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735         u32 adv, bmcr;
1736         u32 new_adv = 0;
1737
1738         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739                 return bnx2_setup_remote_phy(bp, port);
1740
1741         if (!(bp->autoneg & AUTONEG_SPEED)) {
1742                 u32 new_bmcr;
1743                 int force_link_down = 0;
1744
1745                 if (bp->req_line_speed == SPEED_2500) {
1746                         if (!bnx2_test_and_enable_2g5(bp))
1747                                 force_link_down = 1;
1748                 } else if (bp->req_line_speed == SPEED_1000) {
1749                         if (bnx2_test_and_disable_2g5(bp))
1750                                 force_link_down = 1;
1751                 }
1752                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757                 new_bmcr |= BMCR_SPEED1000;
1758
1759                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 bnx2_enable_forced_2g5(bp);
1762                         else if (bp->req_line_speed == SPEED_1000) {
1763                                 bnx2_disable_forced_2g5(bp);
1764                                 new_bmcr &= ~0x2000;
1765                         }
1766
1767                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770                         else
1771                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772                 }
1773
1774                 if (bp->req_duplex == DUPLEX_FULL) {
1775                         adv |= ADVERTISE_1000XFULL;
1776                         new_bmcr |= BMCR_FULLDPLX;
1777                 }
1778                 else {
1779                         adv |= ADVERTISE_1000XHALF;
1780                         new_bmcr &= ~BMCR_FULLDPLX;
1781                 }
1782                 if ((new_bmcr != bmcr) || (force_link_down)) {
1783                         /* Force a link down visible on the other side */
1784                         if (bp->link_up) {
1785                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1786                                                ~(ADVERTISE_1000XFULL |
1787                                                  ADVERTISE_1000XHALF));
1788                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789                                         BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791                                 bp->link_up = 0;
1792                                 netif_carrier_off(bp->dev);
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                                 bnx2_report_link(bp);
1795                         }
1796                         bnx2_write_phy(bp, bp->mii_adv, adv);
1797                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798                 } else {
1799                         bnx2_resolve_flow_ctrl(bp);
1800                         bnx2_set_mac_link(bp);
1801                 }
1802                 return 0;
1803         }
1804
1805         bnx2_test_and_enable_2g5(bp);
1806
1807         if (bp->advertising & ADVERTISED_1000baseT_Full)
1808                 new_adv |= ADVERTISE_1000XFULL;
1809
1810         new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812         bnx2_read_phy(bp, bp->mii_adv, &adv);
1813         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815         bp->serdes_an_pending = 0;
1816         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817                 /* Force a link down visible on the other side */
1818                 if (bp->link_up) {
1819                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820                         spin_unlock_bh(&bp->phy_lock);
1821                         msleep(20);
1822                         spin_lock_bh(&bp->phy_lock);
1823                 }
1824
1825                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827                         BMCR_ANENABLE);
1828                 /* Speed up link-up time when the link partner
1829                  * does not autonegotiate which is very common
1830                  * in blade servers. Some blade servers use
1831                  * IPMI for kerboard input and it's important
1832                  * to minimize link disruptions. Autoneg. involves
1833                  * exchanging base pages plus 3 next pages and
1834                  * normally completes in about 120 msec.
1835                  */
1836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837                 bp->serdes_an_pending = 1;
1838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839         } else {
1840                 bnx2_resolve_flow_ctrl(bp);
1841                 bnx2_set_mac_link(bp);
1842         }
1843
1844         return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1848         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1849                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850                 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1853         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1854         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1855         ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865         u32 link;
1866
1867         if (bp->phy_port == PORT_TP)
1868                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869         else
1870                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873                 bp->req_line_speed = 0;
1874                 bp->autoneg |= AUTONEG_SPEED;
1875                 bp->advertising = ADVERTISED_Autoneg;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877                         bp->advertising |= ADVERTISED_10baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879                         bp->advertising |= ADVERTISED_10baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881                         bp->advertising |= ADVERTISED_100baseT_Half;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883                         bp->advertising |= ADVERTISED_100baseT_Full;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885                         bp->advertising |= ADVERTISED_1000baseT_Full;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887                         bp->advertising |= ADVERTISED_2500baseX_Full;
1888         } else {
1889                 bp->autoneg = 0;
1890                 bp->advertising = 0;
1891                 bp->req_duplex = DUPLEX_FULL;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893                         bp->req_line_speed = SPEED_10;
1894                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895                                 bp->req_duplex = DUPLEX_HALF;
1896                 }
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898                         bp->req_line_speed = SPEED_100;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903                         bp->req_line_speed = SPEED_1000;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905                         bp->req_line_speed = SPEED_2500;
1906         }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913                 bnx2_set_default_remote_link(bp);
1914                 return;
1915         }
1916
1917         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918         bp->req_line_speed = 0;
1919         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920                 u32 reg;
1921
1922                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927                         bp->autoneg = 0;
1928                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1929                         bp->req_duplex = DUPLEX_FULL;
1930                 }
1931         } else
1932                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938         u32 msg;
1939         u32 addr;
1940
1941         spin_lock(&bp->indirect_lock);
1942         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946         spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952         u32 msg;
1953         u8 link_up = bp->link_up;
1954         u8 old_port;
1955
1956         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959                 bnx2_send_heart_beat(bp);
1960
1961         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964                 bp->link_up = 0;
1965         else {
1966                 u32 speed;
1967
1968                 bp->link_up = 1;
1969                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970                 bp->duplex = DUPLEX_FULL;
1971                 switch (speed) {
1972                         case BNX2_LINK_STATUS_10HALF:
1973                                 bp->duplex = DUPLEX_HALF;
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_100BASE_T4:
1980                         case BNX2_LINK_STATUS_100FULL:
1981                                 bp->line_speed = SPEED_100;
1982                                 break;
1983                         case BNX2_LINK_STATUS_1000HALF:
1984                                 bp->duplex = DUPLEX_HALF;
1985                         case BNX2_LINK_STATUS_1000FULL:
1986                                 bp->line_speed = SPEED_1000;
1987                                 break;
1988                         case BNX2_LINK_STATUS_2500HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_2500FULL:
1991                                 bp->line_speed = SPEED_2500;
1992                                 break;
1993                         default:
1994                                 bp->line_speed = 0;
1995                                 break;
1996                 }
1997
1998                 bp->flow_ctrl = 0;
1999                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001                         if (bp->duplex == DUPLEX_FULL)
2002                                 bp->flow_ctrl = bp->req_flow_ctrl;
2003                 } else {
2004                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2006                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2008                 }
2009
2010                 old_port = bp->phy_port;
2011                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012                         bp->phy_port = PORT_FIBRE;
2013                 else
2014                         bp->phy_port = PORT_TP;
2015
2016                 if (old_port != bp->phy_port)
2017                         bnx2_set_default_link(bp);
2018
2019         }
2020         if (bp->link_up != link_up)
2021                 bnx2_report_link(bp);
2022
2023         bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029         u32 evt_code;
2030
2031         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032         switch (evt_code) {
2033                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034                         bnx2_remote_phy_event(bp);
2035                         break;
2036                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037                 default:
2038                         bnx2_send_heart_beat(bp);
2039                         break;
2040         }
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049         u32 bmcr;
2050         u32 new_bmcr;
2051
2052         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054         if (bp->autoneg & AUTONEG_SPEED) {
2055                 u32 adv_reg, adv1000_reg;
2056                 u32 new_adv_reg = 0;
2057                 u32 new_adv1000_reg = 0;
2058
2059                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061                         ADVERTISE_PAUSE_ASYM);
2062
2063                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064                 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066                 if (bp->advertising & ADVERTISED_10baseT_Half)
2067                         new_adv_reg |= ADVERTISE_10HALF;
2068                 if (bp->advertising & ADVERTISED_10baseT_Full)
2069                         new_adv_reg |= ADVERTISE_10FULL;
2070                 if (bp->advertising & ADVERTISED_100baseT_Half)
2071                         new_adv_reg |= ADVERTISE_100HALF;
2072                 if (bp->advertising & ADVERTISED_100baseT_Full)
2073                         new_adv_reg |= ADVERTISE_100FULL;
2074                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075                         new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077                 new_adv_reg |= ADVERTISE_CSMA;
2078
2079                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081                 if ((adv1000_reg != new_adv1000_reg) ||
2082                         (adv_reg != new_adv_reg) ||
2083                         ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088                                 BMCR_ANENABLE);
2089                 }
2090                 else if (bp->link_up) {
2091                         /* Flow ctrl may have changed from auto to forced */
2092                         /* or vice-versa. */
2093
2094                         bnx2_resolve_flow_ctrl(bp);
2095                         bnx2_set_mac_link(bp);
2096                 }
2097                 return 0;
2098         }
2099
2100         new_bmcr = 0;
2101         if (bp->req_line_speed == SPEED_100) {
2102                 new_bmcr |= BMCR_SPEED100;
2103         }
2104         if (bp->req_duplex == DUPLEX_FULL) {
2105                 new_bmcr |= BMCR_FULLDPLX;
2106         }
2107         if (new_bmcr != bmcr) {
2108                 u32 bmsr;
2109
2110                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113                 if (bmsr & BMSR_LSTATUS) {
2114                         /* Force link down */
2115                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116                         spin_unlock_bh(&bp->phy_lock);
2117                         msleep(50);
2118                         spin_lock_bh(&bp->phy_lock);
2119
2120                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                 }
2123
2124                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126                 /* Normally, the new speed is setup after the link has
2127                  * gone down and up again. In some cases, link will not go
2128                  * down so we need to set up the new speed here.
2129                  */
2130                 if (bmsr & BMSR_LSTATUS) {
2131                         bp->line_speed = bp->req_line_speed;
2132                         bp->duplex = bp->req_duplex;
2133                         bnx2_resolve_flow_ctrl(bp);
2134                         bnx2_set_mac_link(bp);
2135                 }
2136         } else {
2137                 bnx2_resolve_flow_ctrl(bp);
2138                 bnx2_set_mac_link(bp);
2139         }
2140         return 0;
2141 }
2142
2143 static int
2144 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145 __releases(&bp->phy_lock)
2146 __acquires(&bp->phy_lock)
2147 {
2148         if (bp->loopback == MAC_LOOPBACK)
2149                 return 0;
2150
2151         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152                 return bnx2_setup_serdes_phy(bp, port);
2153         }
2154         else {
2155                 return bnx2_setup_copper_phy(bp);
2156         }
2157 }
2158
2159 static int
2160 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161 {
2162         u32 val;
2163
2164         bp->mii_bmcr = MII_BMCR + 0x10;
2165         bp->mii_bmsr = MII_BMSR + 0x10;
2166         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167         bp->mii_adv = MII_ADVERTISE + 0x10;
2168         bp->mii_lpa = MII_LPA + 0x10;
2169         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175         if (reset_phy)
2176                 bnx2_reset_phy(bp);
2177
2178         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188                 val |= BCM5708S_UP1_2G5;
2189         else
2190                 val &= ~BCM5708S_UP1_2G5;
2191         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206         return 0;
2207 }
2208
2209 static int
2210 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211 {
2212         u32 val;
2213
2214         if (reset_phy)
2215                 bnx2_reset_phy(bp);
2216
2217         bp->mii_up1 = BCM5708S_UP1;
2218
2219         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233                 val |= BCM5708S_UP1_2G5;
2234                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235         }
2236
2237         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240                 /* increase tx signal amplitude */
2241                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242                                BCM5708S_BLK_ADDR_TX_MISC);
2243                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247         }
2248
2249         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252         if (val) {
2253                 u32 is_backplane;
2254
2255                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258                                        BCM5708S_BLK_ADDR_TX_MISC);
2259                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                        BCM5708S_BLK_ADDR_DIG);
2262                 }
2263         }
2264         return 0;
2265 }
2266
2267 static int
2268 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 {
2270         if (reset_phy)
2271                 bnx2_reset_phy(bp);
2272
2273         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278         if (bp->dev->mtu > 1500) {
2279                 u32 val;
2280
2281                 /* Set extended packet length bit */
2282                 bnx2_write_phy(bp, 0x18, 0x7);
2283                 bnx2_read_phy(bp, 0x18, &val);
2284                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287                 bnx2_read_phy(bp, 0x1c, &val);
2288                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289         }
2290         else {
2291                 u32 val;
2292
2293                 bnx2_write_phy(bp, 0x18, 0x7);
2294                 bnx2_read_phy(bp, 0x18, &val);
2295                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298                 bnx2_read_phy(bp, 0x1c, &val);
2299                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int
2306 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307 {
2308         u32 val;
2309
2310         if (reset_phy)
2311                 bnx2_reset_phy(bp);
2312
2313         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314                 bnx2_write_phy(bp, 0x18, 0x0c00);
2315                 bnx2_write_phy(bp, 0x17, 0x000a);
2316                 bnx2_write_phy(bp, 0x15, 0x310b);
2317                 bnx2_write_phy(bp, 0x17, 0x201f);
2318                 bnx2_write_phy(bp, 0x15, 0x9506);
2319                 bnx2_write_phy(bp, 0x17, 0x401f);
2320                 bnx2_write_phy(bp, 0x15, 0x14e2);
2321                 bnx2_write_phy(bp, 0x18, 0x0400);
2322         }
2323
2324         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2327                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328                 val &= ~(1 << 8);
2329                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330         }
2331
2332         if (bp->dev->mtu > 1500) {
2333                 /* Set extended packet length bit */
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val | 0x1);
2340         }
2341         else {
2342                 bnx2_write_phy(bp, 0x18, 0x7);
2343                 bnx2_read_phy(bp, 0x18, &val);
2344                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346                 bnx2_read_phy(bp, 0x10, &val);
2347                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348         }
2349
2350         /* ethernet@wirespeed */
2351         bnx2_write_phy(bp, 0x18, 0x7007);
2352         bnx2_read_phy(bp, 0x18, &val);
2353         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354         return 0;
2355 }
2356
2357
2358 static int
2359 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360 __releases(&bp->phy_lock)
2361 __acquires(&bp->phy_lock)
2362 {
2363         u32 val;
2364         int rc = 0;
2365
2366         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369         bp->mii_bmcr = MII_BMCR;
2370         bp->mii_bmsr = MII_BMSR;
2371         bp->mii_bmsr1 = MII_BMSR;
2372         bp->mii_adv = MII_ADVERTISE;
2373         bp->mii_lpa = MII_LPA;
2374
2375         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378                 goto setup_phy;
2379
2380         bnx2_read_phy(bp, MII_PHYSID1, &val);
2381         bp->phy_id = val << 16;
2382         bnx2_read_phy(bp, MII_PHYSID2, &val);
2383         bp->phy_id |= val & 0xffff;
2384
2385         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2388                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2390                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2392         }
2393         else {
2394                 rc = bnx2_init_copper_phy(bp, reset_phy);
2395         }
2396
2397 setup_phy:
2398         if (!rc)
2399                 rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401         return rc;
2402 }
2403
2404 static int
2405 bnx2_set_mac_loopback(struct bnx2 *bp)
2406 {
2407         u32 mac_mode;
2408
2409         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413         bp->link_up = 1;
2414         return 0;
2415 }
2416
2417 static int bnx2_test_link(struct bnx2 *);
2418
2419 static int
2420 bnx2_set_phy_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423         int rc, i;
2424
2425         spin_lock_bh(&bp->phy_lock);
2426         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427                             BMCR_SPEED1000);
2428         spin_unlock_bh(&bp->phy_lock);
2429         if (rc)
2430                 return rc;
2431
2432         for (i = 0; i < 10; i++) {
2433                 if (bnx2_test_link(bp) == 0)
2434                         break;
2435                 msleep(100);
2436         }
2437
2438         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441                       BNX2_EMAC_MODE_25G_MODE);
2442
2443         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445         bp->link_up = 1;
2446         return 0;
2447 }
2448
2449 static void
2450 bnx2_dump_mcp_state(struct bnx2 *bp)
2451 {
2452         struct net_device *dev = bp->dev;
2453         u32 mcp_p0, mcp_p1;
2454
2455         netdev_err(dev, "<--- start MCP states dump --->\n");
2456         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2457                 mcp_p0 = BNX2_MCP_STATE_P0;
2458                 mcp_p1 = BNX2_MCP_STATE_P1;
2459         } else {
2460                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2461                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2462         }
2463         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2464                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2465         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2466                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2467                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2468                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2469         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2470                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2471                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2472                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2473         netdev_err(dev, "DEBUG: shmem states:\n");
2474         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2475                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2476                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2477                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2478         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2479         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2480                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2481                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2482         pr_cont(" condition[%08x]\n",
2483                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2484         DP_SHMEM_LINE(bp, 0x3cc);
2485         DP_SHMEM_LINE(bp, 0x3dc);
2486         DP_SHMEM_LINE(bp, 0x3ec);
2487         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2488         netdev_err(dev, "<--- end MCP states dump --->\n");
2489 }
2490
2491 static int
2492 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2493 {
2494         int i;
2495         u32 val;
2496
2497         bp->fw_wr_seq++;
2498         msg_data |= bp->fw_wr_seq;
2499
2500         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2501
2502         if (!ack)
2503                 return 0;
2504
2505         /* wait for an acknowledgement. */
2506         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2507                 msleep(10);
2508
2509                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2510
2511                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2512                         break;
2513         }
2514         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2515                 return 0;
2516
2517         /* If we timed out, inform the firmware that this is the case. */
2518         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2519                 msg_data &= ~BNX2_DRV_MSG_CODE;
2520                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2521
2522                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2523                 if (!silent) {
2524                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2525                         bnx2_dump_mcp_state(bp);
2526                 }
2527
2528                 return -EBUSY;
2529         }
2530
2531         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2532                 return -EIO;
2533
2534         return 0;
2535 }
2536
2537 static int
2538 bnx2_init_5709_context(struct bnx2 *bp)
2539 {
2540         int i, ret = 0;
2541         u32 val;
2542
2543         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2544         val |= (BCM_PAGE_BITS - 8) << 16;
2545         REG_WR(bp, BNX2_CTX_COMMAND, val);
2546         for (i = 0; i < 10; i++) {
2547                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2548                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2549                         break;
2550                 udelay(2);
2551         }
2552         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2553                 return -EBUSY;
2554
2555         for (i = 0; i < bp->ctx_pages; i++) {
2556                 int j;
2557
2558                 if (bp->ctx_blk[i])
2559                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2560                 else
2561                         return -ENOMEM;
2562
2563                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2564                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2565                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2566                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2567                        (u64) bp->ctx_blk_mapping[i] >> 32);
2568                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2569                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2570                 for (j = 0; j < 10; j++) {
2571
2572                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2573                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2574                                 break;
2575                         udelay(5);
2576                 }
2577                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2578                         ret = -EBUSY;
2579                         break;
2580                 }
2581         }
2582         return ret;
2583 }
2584
2585 static void
2586 bnx2_init_context(struct bnx2 *bp)
2587 {
2588         u32 vcid;
2589
2590         vcid = 96;
2591         while (vcid) {
2592                 u32 vcid_addr, pcid_addr, offset;
2593                 int i;
2594
2595                 vcid--;
2596
2597                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2598                         u32 new_vcid;
2599
2600                         vcid_addr = GET_PCID_ADDR(vcid);
2601                         if (vcid & 0x8) {
2602                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2603                         }
2604                         else {
2605                                 new_vcid = vcid;
2606                         }
2607                         pcid_addr = GET_PCID_ADDR(new_vcid);
2608                 }
2609                 else {
2610                         vcid_addr = GET_CID_ADDR(vcid);
2611                         pcid_addr = vcid_addr;
2612                 }
2613
2614                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2615                         vcid_addr += (i << PHY_CTX_SHIFT);
2616                         pcid_addr += (i << PHY_CTX_SHIFT);
2617
2618                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2619                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2620
2621                         /* Zero out the context. */
2622                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2623                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2624                 }
2625         }
2626 }
2627
2628 static int
2629 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2630 {
2631         u16 *good_mbuf;
2632         u32 good_mbuf_cnt;
2633         u32 val;
2634
2635         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2636         if (good_mbuf == NULL) {
2637                 pr_err("Failed to allocate memory in %s\n", __func__);
2638                 return -ENOMEM;
2639         }
2640
2641         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2642                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2643
2644         good_mbuf_cnt = 0;
2645
2646         /* Allocate a bunch of mbufs and save the good ones in an array. */
2647         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2648         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2649                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2650                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2651
2652                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2653
2654                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2655
2656                 /* The addresses with Bit 9 set are bad memory blocks. */
2657                 if (!(val & (1 << 9))) {
2658                         good_mbuf[good_mbuf_cnt] = (u16) val;
2659                         good_mbuf_cnt++;
2660                 }
2661
2662                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2663         }
2664
2665         /* Free the good ones back to the mbuf pool thus discarding
2666          * all the bad ones. */
2667         while (good_mbuf_cnt) {
2668                 good_mbuf_cnt--;
2669
2670                 val = good_mbuf[good_mbuf_cnt];
2671                 val = (val << 9) | val | 1;
2672
2673                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2674         }
2675         kfree(good_mbuf);
2676         return 0;
2677 }
2678
2679 static void
2680 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2681 {
2682         u32 val;
2683
2684         val = (mac_addr[0] << 8) | mac_addr[1];
2685
2686         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2687
2688         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2689                 (mac_addr[4] << 8) | mac_addr[5];
2690
2691         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2692 }
2693
2694 static inline int
2695 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2696 {
2697         dma_addr_t mapping;
2698         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2699         struct rx_bd *rxbd =
2700                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2701         struct page *page = alloc_page(gfp);
2702
2703         if (!page)
2704                 return -ENOMEM;
2705         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2706                                PCI_DMA_FROMDEVICE);
2707         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2708                 __free_page(page);
2709                 return -EIO;
2710         }
2711
2712         rx_pg->page = page;
2713         dma_unmap_addr_set(rx_pg, mapping, mapping);
2714         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2715         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2716         return 0;
2717 }
2718
2719 static void
2720 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2721 {
2722         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2723         struct page *page = rx_pg->page;
2724
2725         if (!page)
2726                 return;
2727
2728         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2729                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2730
2731         __free_page(page);
2732         rx_pg->page = NULL;
2733 }
2734
2735 static inline int
2736 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2737 {
2738         struct sk_buff *skb;
2739         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2740         dma_addr_t mapping;
2741         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2742         unsigned long align;
2743
2744         skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2745         if (skb == NULL) {
2746                 return -ENOMEM;
2747         }
2748
2749         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2750                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2751
2752         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2753                                  PCI_DMA_FROMDEVICE);
2754         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2755                 dev_kfree_skb(skb);
2756                 return -EIO;
2757         }
2758
2759         rx_buf->skb = skb;
2760         rx_buf->desc = (struct l2_fhdr *) skb->data;
2761         dma_unmap_addr_set(rx_buf, mapping, mapping);
2762
2763         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2764         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2765
2766         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2767
2768         return 0;
2769 }
2770
2771 static int
2772 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2773 {
2774         struct status_block *sblk = bnapi->status_blk.msi;
2775         u32 new_link_state, old_link_state;
2776         int is_set = 1;
2777
2778         new_link_state = sblk->status_attn_bits & event;
2779         old_link_state = sblk->status_attn_bits_ack & event;
2780         if (new_link_state != old_link_state) {
2781                 if (new_link_state)
2782                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2783                 else
2784                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2785         } else
2786                 is_set = 0;
2787
2788         return is_set;
2789 }
2790
2791 static void
2792 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2793 {
2794         spin_lock(&bp->phy_lock);
2795
2796         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2797                 bnx2_set_link(bp);
2798         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2799                 bnx2_set_remote_link(bp);
2800
2801         spin_unlock(&bp->phy_lock);
2802
2803 }
2804
2805 static inline u16
2806 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2807 {
2808         u16 cons;
2809
2810         /* Tell compiler that status block fields can change. */
2811         barrier();
2812         cons = *bnapi->hw_tx_cons_ptr;
2813         barrier();
2814         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2815                 cons++;
2816         return cons;
2817 }
2818
2819 static int
2820 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2821 {
2822         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2823         u16 hw_cons, sw_cons, sw_ring_cons;
2824         int tx_pkt = 0, index;
2825         struct netdev_queue *txq;
2826
2827         index = (bnapi - bp->bnx2_napi);
2828         txq = netdev_get_tx_queue(bp->dev, index);
2829
2830         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2831         sw_cons = txr->tx_cons;
2832
2833         while (sw_cons != hw_cons) {
2834                 struct sw_tx_bd *tx_buf;
2835                 struct sk_buff *skb;
2836                 int i, last;
2837
2838                 sw_ring_cons = TX_RING_IDX(sw_cons);
2839
2840                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2841                 skb = tx_buf->skb;
2842
2843                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2844                 prefetch(&skb->end);
2845
2846                 /* partial BD completions possible with TSO packets */
2847                 if (tx_buf->is_gso) {
2848                         u16 last_idx, last_ring_idx;
2849
2850                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2851                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2852                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2853                                 last_idx++;
2854                         }
2855                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2856                                 break;
2857                         }
2858                 }
2859
2860                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2861                         skb_headlen(skb), PCI_DMA_TODEVICE);
2862
2863                 tx_buf->skb = NULL;
2864                 last = tx_buf->nr_frags;
2865
2866                 for (i = 0; i < last; i++) {
2867                         sw_cons = NEXT_TX_BD(sw_cons);
2868
2869                         dma_unmap_page(&bp->pdev->dev,
2870                                 dma_unmap_addr(
2871                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2872                                         mapping),
2873                                 skb_shinfo(skb)->frags[i].size,
2874                                 PCI_DMA_TODEVICE);
2875                 }
2876
2877                 sw_cons = NEXT_TX_BD(sw_cons);
2878
2879                 dev_kfree_skb(skb);
2880                 tx_pkt++;
2881                 if (tx_pkt == budget)
2882                         break;
2883
2884                 if (hw_cons == sw_cons)
2885                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2886         }
2887
2888         txr->hw_tx_cons = hw_cons;
2889         txr->tx_cons = sw_cons;
2890
2891         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2892          * before checking for netif_tx_queue_stopped().  Without the
2893          * memory barrier, there is a small possibility that bnx2_start_xmit()
2894          * will miss it and cause the queue to be stopped forever.
2895          */
2896         smp_mb();
2897
2898         if (unlikely(netif_tx_queue_stopped(txq)) &&
2899                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2900                 __netif_tx_lock(txq, smp_processor_id());
2901                 if ((netif_tx_queue_stopped(txq)) &&
2902                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2903                         netif_tx_wake_queue(txq);
2904                 __netif_tx_unlock(txq);
2905         }
2906
2907         return tx_pkt;
2908 }
2909
2910 static void
2911 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2912                         struct sk_buff *skb, int count)
2913 {
2914         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2915         struct rx_bd *cons_bd, *prod_bd;
2916         int i;
2917         u16 hw_prod, prod;
2918         u16 cons = rxr->rx_pg_cons;
2919
2920         cons_rx_pg = &rxr->rx_pg_ring[cons];
2921
2922         /* The caller was unable to allocate a new page to replace the
2923          * last one in the frags array, so we need to recycle that page
2924          * and then free the skb.
2925          */
2926         if (skb) {
2927                 struct page *page;
2928                 struct skb_shared_info *shinfo;
2929
2930                 shinfo = skb_shinfo(skb);
2931                 shinfo->nr_frags--;
2932                 page = shinfo->frags[shinfo->nr_frags].page;
2933                 shinfo->frags[shinfo->nr_frags].page = NULL;
2934
2935                 cons_rx_pg->page = page;
2936                 dev_kfree_skb(skb);
2937         }
2938
2939         hw_prod = rxr->rx_pg_prod;
2940
2941         for (i = 0; i < count; i++) {
2942                 prod = RX_PG_RING_IDX(hw_prod);
2943
2944                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2945                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2946                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2947                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2948
2949                 if (prod != cons) {
2950                         prod_rx_pg->page = cons_rx_pg->page;
2951                         cons_rx_pg->page = NULL;
2952                         dma_unmap_addr_set(prod_rx_pg, mapping,
2953                                 dma_unmap_addr(cons_rx_pg, mapping));
2954
2955                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2957
2958                 }
2959                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2960                 hw_prod = NEXT_RX_BD(hw_prod);
2961         }
2962         rxr->rx_pg_prod = hw_prod;
2963         rxr->rx_pg_cons = cons;
2964 }
2965
2966 static inline void
2967 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2968                   struct sk_buff *skb, u16 cons, u16 prod)
2969 {
2970         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2971         struct rx_bd *cons_bd, *prod_bd;
2972
2973         cons_rx_buf = &rxr->rx_buf_ring[cons];
2974         prod_rx_buf = &rxr->rx_buf_ring[prod];
2975
2976         dma_sync_single_for_device(&bp->pdev->dev,
2977                 dma_unmap_addr(cons_rx_buf, mapping),
2978                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2979
2980         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2981
2982         prod_rx_buf->skb = skb;
2983         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2984
2985         if (cons == prod)
2986                 return;
2987
2988         dma_unmap_addr_set(prod_rx_buf, mapping,
2989                         dma_unmap_addr(cons_rx_buf, mapping));
2990
2991         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2992         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2993         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2994         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2995 }
2996
2997 static int
2998 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2999             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3000             u32 ring_idx)
3001 {
3002         int err;
3003         u16 prod = ring_idx & 0xffff;
3004
3005         err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
3006         if (unlikely(err)) {
3007                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
3008                 if (hdr_len) {
3009                         unsigned int raw_len = len + 4;
3010                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3011
3012                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3013                 }
3014                 return err;
3015         }
3016
3017         skb_reserve(skb, BNX2_RX_OFFSET);
3018         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3019                          PCI_DMA_FROMDEVICE);
3020
3021         if (hdr_len == 0) {
3022                 skb_put(skb, len);
3023                 return 0;
3024         } else {
3025                 unsigned int i, frag_len, frag_size, pages;
3026                 struct sw_pg *rx_pg;
3027                 u16 pg_cons = rxr->rx_pg_cons;
3028                 u16 pg_prod = rxr->rx_pg_prod;
3029
3030                 frag_size = len + 4 - hdr_len;
3031                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032                 skb_put(skb, hdr_len);
3033
3034                 for (i = 0; i < pages; i++) {
3035                         dma_addr_t mapping_old;
3036
3037                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038                         if (unlikely(frag_len <= 4)) {
3039                                 unsigned int tail = 4 - frag_len;
3040
3041                                 rxr->rx_pg_cons = pg_cons;
3042                                 rxr->rx_pg_prod = pg_prod;
3043                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3044                                                         pages - i);
3045                                 skb->len -= tail;
3046                                 if (i == 0) {
3047                                         skb->tail -= tail;
3048                                 } else {
3049                                         skb_frag_t *frag =
3050                                                 &skb_shinfo(skb)->frags[i - 1];
3051                                         frag->size -= tail;
3052                                         skb->data_len -= tail;
3053                                         skb->truesize -= tail;
3054                                 }
3055                                 return 0;
3056                         }
3057                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3058
3059                         /* Don't unmap yet.  If we're unable to allocate a new
3060                          * page, we need to recycle the page and the DMA addr.
3061                          */
3062                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3063                         if (i == pages - 1)
3064                                 frag_len -= 4;
3065
3066                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067                         rx_pg->page = NULL;
3068
3069                         err = bnx2_alloc_rx_page(bp, rxr,
3070                                                  RX_PG_RING_IDX(pg_prod),
3071                                                  GFP_ATOMIC);
3072                         if (unlikely(err)) {
3073                                 rxr->rx_pg_cons = pg_cons;
3074                                 rxr->rx_pg_prod = pg_prod;
3075                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076                                                         pages - i);
3077                                 return err;
3078                         }
3079
3080                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3081                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082
3083                         frag_size -= frag_len;
3084                         skb->data_len += frag_len;
3085                         skb->truesize += frag_len;
3086                         skb->len += frag_len;
3087
3088                         pg_prod = NEXT_RX_BD(pg_prod);
3089                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3090                 }
3091                 rxr->rx_pg_prod = pg_prod;
3092                 rxr->rx_pg_cons = pg_cons;
3093         }
3094         return 0;
3095 }
3096
3097 static inline u16
3098 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099 {
3100         u16 cons;
3101
3102         /* Tell compiler that status block fields can change. */
3103         barrier();
3104         cons = *bnapi->hw_rx_cons_ptr;
3105         barrier();
3106         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3107                 cons++;
3108         return cons;
3109 }
3110
3111 static int
3112 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 {
3114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116         struct l2_fhdr *rx_hdr;
3117         int rx_pkt = 0, pg_ring_used = 0;
3118
3119         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120         sw_cons = rxr->rx_cons;
3121         sw_prod = rxr->rx_prod;
3122
3123         /* Memory barrier necessary as speculative reads of the rx
3124          * buffer can be ahead of the index in the status block
3125          */
3126         rmb();
3127         while (sw_cons != hw_cons) {
3128                 unsigned int len, hdr_len;
3129                 u32 status;
3130                 struct sw_bd *rx_buf, *next_rx_buf;
3131                 struct sk_buff *skb;
3132                 dma_addr_t dma_addr;
3133
3134                 sw_ring_cons = RX_RING_IDX(sw_cons);
3135                 sw_ring_prod = RX_RING_IDX(sw_prod);
3136
3137                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3138                 skb = rx_buf->skb;
3139                 prefetchw(skb);
3140
3141                 next_rx_buf =
3142                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3143                 prefetch(next_rx_buf->desc);
3144
3145                 rx_buf->skb = NULL;
3146
3147                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3148
3149                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3150                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3151                         PCI_DMA_FROMDEVICE);
3152
3153                 rx_hdr = rx_buf->desc;
3154                 len = rx_hdr->l2_fhdr_pkt_len;
3155                 status = rx_hdr->l2_fhdr_status;
3156
3157                 hdr_len = 0;
3158                 if (status & L2_FHDR_STATUS_SPLIT) {
3159                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3160                         pg_ring_used = 1;
3161                 } else if (len > bp->rx_jumbo_thresh) {
3162                         hdr_len = bp->rx_jumbo_thresh;
3163                         pg_ring_used = 1;
3164                 }
3165
3166                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3167                                        L2_FHDR_ERRORS_PHY_DECODE |
3168                                        L2_FHDR_ERRORS_ALIGNMENT |
3169                                        L2_FHDR_ERRORS_TOO_SHORT |
3170                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3171
3172                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173                                           sw_ring_prod);
3174                         if (pg_ring_used) {
3175                                 int pages;
3176
3177                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3178
3179                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3180                         }
3181                         goto next_rx;
3182                 }
3183
3184                 len -= 4;
3185
3186                 if (len <= bp->rx_copy_thresh) {
3187                         struct sk_buff *new_skb;
3188
3189                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3190                         if (new_skb == NULL) {
3191                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3192                                                   sw_ring_prod);
3193                                 goto next_rx;
3194                         }
3195
3196                         /* aligned copy */
3197                         skb_copy_from_linear_data_offset(skb,
3198                                                          BNX2_RX_OFFSET - 6,
3199                                       new_skb->data, len + 6);
3200                         skb_reserve(new_skb, 6);
3201                         skb_put(new_skb, len);
3202
3203                         bnx2_reuse_rx_skb(bp, rxr, skb,
3204                                 sw_ring_cons, sw_ring_prod);
3205
3206                         skb = new_skb;
3207                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3208                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3209                         goto next_rx;
3210
3211                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3214
3215                 skb->protocol = eth_type_trans(skb, bp->dev);
3216
3217                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218                         (ntohs(skb->protocol) != 0x8100)) {
3219
3220                         dev_kfree_skb(skb);
3221                         goto next_rx;
3222
3223                 }
3224
3225                 skb_checksum_none_assert(skb);
3226                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229
3230                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3233                 }
3234                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3235                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236                      L2_FHDR_STATUS_USE_RXHASH))
3237                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3238
3239                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3240                 napi_gro_receive(&bnapi->napi, skb);
3241                 rx_pkt++;
3242
3243 next_rx:
3244                 sw_cons = NEXT_RX_BD(sw_cons);
3245                 sw_prod = NEXT_RX_BD(sw_prod);
3246
3247                 if ((rx_pkt == budget))
3248                         break;
3249
3250                 /* Refresh hw_cons to see if there is new work */
3251                 if (sw_cons == hw_cons) {
3252                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3253                         rmb();
3254                 }
3255         }
3256         rxr->rx_cons = sw_cons;
3257         rxr->rx_prod = sw_prod;
3258
3259         if (pg_ring_used)
3260                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3261
3262         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3263
3264         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3265
3266         mmiowb();
3267
3268         return rx_pkt;
3269
3270 }
3271
3272 /* MSI ISR - The only difference between this and the INTx ISR
3273  * is that the MSI interrupt is always serviced.
3274  */
3275 static irqreturn_t
3276 bnx2_msi(int irq, void *dev_instance)
3277 {
3278         struct bnx2_napi *bnapi = dev_instance;
3279         struct bnx2 *bp = bnapi->bp;
3280
3281         prefetch(bnapi->status_blk.msi);
3282         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285
3286         /* Return here if interrupt is disabled. */
3287         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3288                 return IRQ_HANDLED;
3289
3290         napi_schedule(&bnapi->napi);
3291
3292         return IRQ_HANDLED;
3293 }
3294
3295 static irqreturn_t
3296 bnx2_msi_1shot(int irq, void *dev_instance)
3297 {
3298         struct bnx2_napi *bnapi = dev_instance;
3299         struct bnx2 *bp = bnapi->bp;
3300
3301         prefetch(bnapi->status_blk.msi);
3302
3303         /* Return here if interrupt is disabled. */
3304         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3305                 return IRQ_HANDLED;
3306
3307         napi_schedule(&bnapi->napi);
3308
3309         return IRQ_HANDLED;
3310 }
3311
3312 static irqreturn_t
3313 bnx2_interrupt(int irq, void *dev_instance)
3314 {
3315         struct bnx2_napi *bnapi = dev_instance;
3316         struct bnx2 *bp = bnapi->bp;
3317         struct status_block *sblk = bnapi->status_blk.msi;
3318
3319         /* When using INTx, it is possible for the interrupt to arrive
3320          * at the CPU before the status block posted prior to the
3321          * interrupt. Reading a register will flush the status block.
3322          * When using MSI, the MSI message will always complete after
3323          * the status block write.
3324          */
3325         if ((sblk->status_idx == bnapi->last_status_idx) &&
3326             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3328                 return IRQ_NONE;
3329
3330         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333
3334         /* Read back to deassert IRQ immediately to avoid too many
3335          * spurious interrupts.
3336          */
3337         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3338
3339         /* Return here if interrupt is shared and is disabled. */
3340         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3341                 return IRQ_HANDLED;
3342
3343         if (napi_schedule_prep(&bnapi->napi)) {
3344                 bnapi->last_status_idx = sblk->status_idx;
3345                 __napi_schedule(&bnapi->napi);
3346         }
3347
3348         return IRQ_HANDLED;
3349 }
3350
3351 static inline int
3352 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3353 {
3354         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3355         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3356
3357         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3358             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3359                 return 1;
3360         return 0;
3361 }
3362
3363 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3364                                  STATUS_ATTN_BITS_TIMER_ABORT)
3365
3366 static inline int
3367 bnx2_has_work(struct bnx2_napi *bnapi)
3368 {
3369         struct status_block *sblk = bnapi->status_blk.msi;
3370
3371         if (bnx2_has_fast_work(bnapi))
3372                 return 1;
3373
3374 #ifdef BCM_CNIC
3375         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3376                 return 1;
3377 #endif
3378
3379         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3381                 return 1;
3382
3383         return 0;
3384 }
3385
3386 static void
3387 bnx2_chk_missed_msi(struct bnx2 *bp)
3388 {
3389         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3390         u32 msi_ctrl;
3391
3392         if (bnx2_has_work(bnapi)) {
3393                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3395                         return;
3396
3397                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3402                 }
3403         }
3404
3405         bp->idle_chk_status_idx = bnapi->last_status_idx;
3406 }
3407
3408 #ifdef BCM_CNIC
3409 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410 {
3411         struct cnic_ops *c_ops;
3412
3413         if (!bnapi->cnic_present)
3414                 return;
3415
3416         rcu_read_lock();
3417         c_ops = rcu_dereference(bp->cnic_ops);
3418         if (c_ops)
3419                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420                                                       bnapi->status_blk.msi);
3421         rcu_read_unlock();
3422 }
3423 #endif
3424
3425 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3426 {
3427         struct status_block *sblk = bnapi->status_blk.msi;
3428         u32 status_attn_bits = sblk->status_attn_bits;
3429         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3430
3431         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3433
3434                 bnx2_phy_int(bp, bnapi);
3435
3436                 /* This is needed to take care of transient status
3437                  * during link changes.
3438                  */
3439                 REG_WR(bp, BNX2_HC_COMMAND,
3440                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441                 REG_RD(bp, BNX2_HC_COMMAND);
3442         }
3443 }
3444
3445 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446                           int work_done, int budget)
3447 {
3448         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3450
3451         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3452                 bnx2_tx_int(bp, bnapi, 0);
3453
3454         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3455                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3456
3457         return work_done;
3458 }
3459
3460 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3461 {
3462         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463         struct bnx2 *bp = bnapi->bp;
3464         int work_done = 0;
3465         struct status_block_msix *sblk = bnapi->status_blk.msix;
3466
3467         while (1) {
3468                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469                 if (unlikely(work_done >= budget))
3470                         break;
3471
3472                 bnapi->last_status_idx = sblk->status_idx;
3473                 /* status idx must be read before checking for more work. */
3474                 rmb();
3475                 if (likely(!bnx2_has_fast_work(bnapi))) {
3476
3477                         napi_complete(napi);
3478                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480                                bnapi->last_status_idx);
3481                         break;
3482                 }
3483         }
3484         return work_done;
3485 }
3486
3487 static int bnx2_poll(struct napi_struct *napi, int budget)
3488 {
3489         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490         struct bnx2 *bp = bnapi->bp;
3491         int work_done = 0;
3492         struct status_block *sblk = bnapi->status_blk.msi;
3493
3494         while (1) {
3495                 bnx2_poll_link(bp, bnapi);
3496
3497                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3498
3499 #ifdef BCM_CNIC
3500                 bnx2_poll_cnic(bp, bnapi);
3501 #endif
3502
3503                 /* bnapi->last_status_idx is used below to tell the hw how
3504                  * much work has been processed, so we must read it before
3505                  * checking for more work.
3506                  */
3507                 bnapi->last_status_idx = sblk->status_idx;
3508
3509                 if (unlikely(work_done >= budget))
3510                         break;
3511
3512                 rmb();
3513                 if (likely(!bnx2_has_work(bnapi))) {
3514                         napi_complete(napi);
3515                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3516                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518                                        bnapi->last_status_idx);
3519                                 break;
3520                         }
3521                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3524                                bnapi->last_status_idx);
3525
3526                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528                                bnapi->last_status_idx);
3529                         break;
3530                 }
3531         }
3532
3533         return work_done;
3534 }
3535
3536 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3537  * from set_multicast.
3538  */
3539 static void
3540 bnx2_set_rx_mode(struct net_device *dev)
3541 {
3542         struct bnx2 *bp = netdev_priv(dev);
3543         u32 rx_mode, sort_mode;
3544         struct netdev_hw_addr *ha;
3545         int i;
3546
3547         if (!netif_running(dev))
3548                 return;
3549
3550         spin_lock_bh(&bp->phy_lock);
3551
3552         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3555         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3557                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3558         if (dev->flags & IFF_PROMISC) {
3559                 /* Promiscuous mode. */
3560                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3561                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3563         }
3564         else if (dev->flags & IFF_ALLMULTI) {
3565                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567                                0xffffffff);
3568                 }
3569                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570         }
3571         else {
3572                 /* Accept one or more multicast(s). */
3573                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574                 u32 regidx;
3575                 u32 bit;
3576                 u32 crc;
3577
3578                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579
3580                 netdev_for_each_mc_addr(ha, dev) {
3581                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3582                         bit = crc & 0xff;
3583                         regidx = (bit & 0xe0) >> 5;
3584                         bit &= 0x1f;
3585                         mc_filter[regidx] |= (1 << bit);
3586                 }
3587
3588                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590                                mc_filter[i]);
3591                 }
3592
3593                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594         }
3595
3596         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3597                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3600         } else if (!(dev->flags & IFF_PROMISC)) {
3601                 /* Add all entries into to the match filter list */
3602                 i = 0;
3603                 netdev_for_each_uc_addr(ha, dev) {
3604                         bnx2_set_mac_addr(bp, ha->addr,
3605                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606                         sort_mode |= (1 <<
3607                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3608                         i++;
3609                 }
3610
3611         }
3612
3613         if (rx_mode != bp->rx_mode) {
3614                 bp->rx_mode = rx_mode;
3615                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616         }
3617
3618         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621
3622         spin_unlock_bh(&bp->phy_lock);
3623 }
3624
3625 static int __devinit
3626 check_fw_section(const struct firmware *fw,
3627                  const struct bnx2_fw_file_section *section,
3628                  u32 alignment, bool non_empty)
3629 {
3630         u32 offset = be32_to_cpu(section->offset);
3631         u32 len = be32_to_cpu(section->len);
3632
3633         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634                 return -EINVAL;
3635         if ((non_empty && len == 0) || len > fw->size - offset ||
3636             len & (alignment - 1))
3637                 return -EINVAL;
3638         return 0;
3639 }
3640
3641 static int __devinit
3642 check_mips_fw_entry(const struct firmware *fw,
3643                     const struct bnx2_mips_fw_file_entry *entry)
3644 {
3645         if (check_fw_section(fw, &entry->text, 4, true) ||
3646             check_fw_section(fw, &entry->data, 4, false) ||
3647             check_fw_section(fw, &entry->rodata, 4, false))
3648                 return -EINVAL;
3649         return 0;
3650 }
3651
3652 static int __devinit
3653 bnx2_request_firmware(struct bnx2 *bp)
3654 {
3655         const char *mips_fw_file, *rv2p_fw_file;
3656         const struct bnx2_mips_fw_file *mips_fw;
3657         const struct bnx2_rv2p_fw_file *rv2p_fw;
3658         int rc;
3659
3660         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661                 mips_fw_file = FW_MIPS_FILE_09;
3662                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3665                 else
3666                         rv2p_fw_file = FW_RV2P_FILE_09;
3667         } else {
3668                 mips_fw_file = FW_MIPS_FILE_06;
3669                 rv2p_fw_file = FW_RV2P_FILE_06;
3670         }
3671
3672         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3673         if (rc) {
3674                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3675                 return rc;
3676         }
3677
3678         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3679         if (rc) {
3680                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3681                 return rc;
3682         }
3683         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3691                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3692                 return -EINVAL;
3693         }
3694         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3695             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3696             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3697                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3698                 return -EINVAL;
3699         }
3700
3701         return 0;
3702 }
3703
3704 static u32
3705 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3706 {
3707         switch (idx) {
3708         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3709                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3710                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3711                 break;
3712         }
3713         return rv2p_code;
3714 }
3715
3716 static int
3717 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3718              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3719 {
3720         u32 rv2p_code_len, file_offset;
3721         __be32 *rv2p_code;
3722         int i;
3723         u32 val, cmd, addr;
3724
3725         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3726         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3727
3728         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3729
3730         if (rv2p_proc == RV2P_PROC1) {
3731                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3732                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3733         } else {
3734                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3735                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3736         }
3737
3738         for (i = 0; i < rv2p_code_len; i += 8) {
3739                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3740                 rv2p_code++;
3741                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3742                 rv2p_code++;
3743
3744                 val = (i / 8) | cmd;
3745                 REG_WR(bp, addr, val);
3746         }
3747
3748         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749         for (i = 0; i < 8; i++) {
3750                 u32 loc, code;
3751
3752                 loc = be32_to_cpu(fw_entry->fixup[i]);
3753                 if (loc && ((loc * 4) < rv2p_code_len)) {
3754                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3755                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3756                         code = be32_to_cpu(*(rv2p_code + loc));
3757                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3758                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3759
3760                         val = (loc / 2) | cmd;
3761                         REG_WR(bp, addr, val);
3762                 }
3763         }
3764
3765         /* Reset the processor, un-stall is done later. */
3766         if (rv2p_proc == RV2P_PROC1) {
3767                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3768         }
3769         else {
3770                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3771         }
3772
3773         return 0;
3774 }
3775
3776 static int
3777 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3778             const struct bnx2_mips_fw_file_entry *fw_entry)
3779 {
3780         u32 addr, len, file_offset;
3781         __be32 *data;
3782         u32 offset;
3783         u32 val;
3784
3785         /* Halt the CPU. */
3786         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3787         val |= cpu_reg->mode_value_halt;
3788         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3789         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3790
3791         /* Load the Text area. */
3792         addr = be32_to_cpu(fw_entry->text.addr);
3793         len = be32_to_cpu(fw_entry->text.len);
3794         file_offset = be32_to_cpu(fw_entry->text.offset);
3795         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3796
3797         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3798         if (len) {
3799                 int j;
3800
3801                 for (j = 0; j < (len / 4); j++, offset += 4)
3802                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3803         }
3804
3805         /* Load the Data area. */
3806         addr = be32_to_cpu(fw_entry->data.addr);
3807         len = be32_to_cpu(fw_entry->data.len);
3808         file_offset = be32_to_cpu(fw_entry->data.offset);
3809         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3810
3811         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3812         if (len) {
3813                 int j;
3814
3815                 for (j = 0; j < (len / 4); j++, offset += 4)
3816                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3817         }
3818
3819         /* Load the Read-Only area. */
3820         addr = be32_to_cpu(fw_entry->rodata.addr);
3821         len = be32_to_cpu(fw_entry->rodata.len);
3822         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3823         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3824
3825         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3826         if (len) {
3827                 int j;
3828
3829                 for (j = 0; j < (len / 4); j++, offset += 4)
3830                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3831         }
3832
3833         /* Clear the pre-fetch instruction. */
3834         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3835
3836         val = be32_to_cpu(fw_entry->start_addr);
3837         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3838
3839         /* Start the CPU. */
3840         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3841         val &= ~cpu_reg->mode_value_halt;
3842         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3843         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3844
3845         return 0;
3846 }
3847
3848 static int
3849 bnx2_init_cpus(struct bnx2 *bp)
3850 {
3851         const struct bnx2_mips_fw_file *mips_fw =
3852                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3853         const struct bnx2_rv2p_fw_file *rv2p_fw =
3854                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3855         int rc;
3856
3857         /* Initialize the RV2P processor. */
3858         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3859         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3860
3861         /* Initialize the RX Processor. */
3862         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3863         if (rc)
3864                 goto init_cpu_err;
3865
3866         /* Initialize the TX Processor. */
3867         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3868         if (rc)
3869                 goto init_cpu_err;
3870
3871         /* Initialize the TX Patch-up Processor. */
3872         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3873         if (rc)
3874                 goto init_cpu_err;
3875
3876         /* Initialize the Completion Processor. */
3877         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3878         if (rc)
3879                 goto init_cpu_err;
3880
3881         /* Initialize the Command Processor. */
3882         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3883
3884 init_cpu_err:
3885         return rc;
3886 }
3887
3888 static int
3889 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3890 {
3891         u16 pmcsr;
3892
3893         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3894
3895         switch (state) {
3896         case PCI_D0: {
3897                 u32 val;
3898
3899                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3900                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3901                         PCI_PM_CTRL_PME_STATUS);
3902
3903                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3904                         /* delay required during transition out of D3hot */
3905                         msleep(20);
3906
3907                 val = REG_RD(bp, BNX2_EMAC_MODE);
3908                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3909                 val &= ~BNX2_EMAC_MODE_MPKT;
3910                 REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3913                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3914                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3915                 break;
3916         }
3917         case PCI_D3hot: {
3918                 int i;
3919                 u32 val, wol_msg;
3920
3921                 if (bp->wol) {
3922                         u32 advertising;
3923                         u8 autoneg;
3924
3925                         autoneg = bp->autoneg;
3926                         advertising = bp->advertising;
3927
3928                         if (bp->phy_port == PORT_TP) {
3929                                 bp->autoneg = AUTONEG_SPEED;
3930                                 bp->advertising = ADVERTISED_10baseT_Half |
3931                                         ADVERTISED_10baseT_Full |
3932                                         ADVERTISED_100baseT_Half |
3933                                         ADVERTISED_100baseT_Full |
3934                                         ADVERTISED_Autoneg;
3935                         }
3936
3937                         spin_lock_bh(&bp->phy_lock);
3938                         bnx2_setup_phy(bp, bp->phy_port);
3939                         spin_unlock_bh(&bp->phy_lock);
3940
3941                         bp->autoneg = autoneg;
3942                         bp->advertising = advertising;
3943
3944                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3945
3946                         val = REG_RD(bp, BNX2_EMAC_MODE);
3947
3948                         /* Enable port mode. */
3949                         val &= ~BNX2_EMAC_MODE_PORT;
3950                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3951                                BNX2_EMAC_MODE_ACPI_RCVD |
3952                                BNX2_EMAC_MODE_MPKT;
3953                         if (bp->phy_port == PORT_TP)
3954                                 val |= BNX2_EMAC_MODE_PORT_MII;
3955                         else {
3956                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3957                                 if (bp->line_speed == SPEED_2500)
3958                                         val |= BNX2_EMAC_MODE_25G_MODE;
3959                         }
3960
3961                         REG_WR(bp, BNX2_EMAC_MODE, val);
3962
3963                         /* receive all multicast */
3964                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3965                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3966                                        0xffffffff);
3967                         }
3968                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3969                                BNX2_EMAC_RX_MODE_SORT_MODE);
3970
3971                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3972                               BNX2_RPM_SORT_USER0_MC_EN;
3973                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3974                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3975                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3976                                BNX2_RPM_SORT_USER0_ENA);
3977
3978                         /* Need to enable EMAC and RPM for WOL. */
3979                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3980                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3981                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3982                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3983
3984                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3985                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3986                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3987
3988                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3989                 }
3990                 else {
3991                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3992                 }
3993
3994                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3995                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3996                                      1, 0);
3997
3998                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3999                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4000                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4001
4002                         if (bp->wol)
4003                                 pmcsr |= 3;
4004                 }
4005                 else {
4006                         pmcsr |= 3;
4007                 }
4008                 if (bp->wol) {
4009                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4010                 }
4011                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4012                                       pmcsr);
4013
4014                 /* No more memory access after this point until
4015                  * device is brought back to D0.
4016                  */
4017                 udelay(50);
4018                 break;
4019         }
4020         default:
4021                 return -EINVAL;
4022         }
4023         return 0;
4024 }
4025
4026 static int
4027 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4028 {
4029         u32 val;
4030         int j;
4031
4032         /* Request access to the flash interface. */
4033         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4034         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4037                         break;
4038
4039                 udelay(5);
4040         }
4041
4042         if (j >= NVRAM_TIMEOUT_COUNT)
4043                 return -EBUSY;
4044
4045         return 0;
4046 }
4047
4048 static int
4049 bnx2_release_nvram_lock(struct bnx2 *bp)
4050 {
4051         int j;
4052         u32 val;
4053
4054         /* Relinquish nvram interface. */
4055         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4056
4057         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4059                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4060                         break;
4061
4062                 udelay(5);
4063         }
4064
4065         if (j >= NVRAM_TIMEOUT_COUNT)
4066                 return -EBUSY;
4067
4068         return 0;
4069 }
4070
4071
4072 static int
4073 bnx2_enable_nvram_write(struct bnx2 *bp)
4074 {
4075         u32 val;
4076
4077         val = REG_RD(bp, BNX2_MISC_CFG);
4078         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4079
4080         if (bp->flash_info->flags & BNX2_NV_WREN) {
4081                 int j;
4082
4083                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4084                 REG_WR(bp, BNX2_NVM_COMMAND,
4085                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4086
4087                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088                         udelay(5);
4089
4090                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4091                         if (val & BNX2_NVM_COMMAND_DONE)
4092                                 break;
4093                 }
4094
4095                 if (j >= NVRAM_TIMEOUT_COUNT)
4096                         return -EBUSY;
4097         }
4098         return 0;
4099 }
4100
4101 static void
4102 bnx2_disable_nvram_write(struct bnx2 *bp)
4103 {
4104         u32 val;
4105
4106         val = REG_RD(bp, BNX2_MISC_CFG);
4107         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4108 }
4109
4110
4111 static void
4112 bnx2_enable_nvram_access(struct bnx2 *bp)
4113 {
4114         u32 val;
4115
4116         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4117         /* Enable both bits, even on read. */
4118         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4119                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4120 }
4121
4122 static void
4123 bnx2_disable_nvram_access(struct bnx2 *bp)
4124 {
4125         u32 val;
4126
4127         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4128         /* Disable both bits, even after read. */
4129         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4130                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4131                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4132 }
4133
4134 static int
4135 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4136 {
4137         u32 cmd;
4138         int j;
4139
4140         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4141                 /* Buffered flash, no erase needed */
4142                 return 0;
4143
4144         /* Build an erase command */
4145         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4146               BNX2_NVM_COMMAND_DOIT;
4147
4148         /* Need to clear DONE bit separately. */
4149         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4150
4151         /* Address of the NVRAM to read from. */
4152         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4153
4154         /* Issue an erase command. */
4155         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4156
4157         /* Wait for completion. */
4158         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4159                 u32 val;
4160
4161                 udelay(5);
4162
4163                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4164                 if (val & BNX2_NVM_COMMAND_DONE)
4165                         break;
4166         }
4167
4168         if (j >= NVRAM_TIMEOUT_COUNT)
4169                 return -EBUSY;
4170
4171         return 0;
4172 }
4173
4174 static int
4175 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4176 {
4177         u32 cmd;
4178         int j;
4179
4180         /* Build the command word. */
4181         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4182
4183         /* Calculate an offset of a buffered flash, not needed for 5709. */
4184         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4185                 offset = ((offset / bp->flash_info->page_size) <<
4186                            bp->flash_info->page_bits) +
4187                           (offset % bp->flash_info->page_size);
4188         }
4189
4190         /* Need to clear DONE bit separately. */
4191         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4192
4193         /* Address of the NVRAM to read from. */
4194         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196         /* Issue a read command. */
4197         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199         /* Wait for completion. */
4200         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201                 u32 val;
4202
4203                 udelay(5);
4204
4205                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4206                 if (val & BNX2_NVM_COMMAND_DONE) {
4207                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4208                         memcpy(ret_val, &v, 4);
4209                         break;
4210                 }
4211         }
4212         if (j >= NVRAM_TIMEOUT_COUNT)
4213                 return -EBUSY;
4214
4215         return 0;
4216 }
4217
4218
4219 static int
4220 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4221 {
4222         u32 cmd;
4223         __be32 val32;
4224         int j;
4225
4226         /* Build the command word. */
4227         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4228
4229         /* Calculate an offset of a buffered flash, not needed for 5709. */
4230         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4231                 offset = ((offset / bp->flash_info->page_size) <<
4232                           bp->flash_info->page_bits) +
4233                          (offset % bp->flash_info->page_size);
4234         }
4235
4236         /* Need to clear DONE bit separately. */
4237         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4238
4239         memcpy(&val32, val, 4);
4240
4241         /* Write the data. */
4242         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4243
4244         /* Address of the NVRAM to write to. */
4245         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4246
4247         /* Issue the write command. */
4248         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4249
4250         /* Wait for completion. */
4251         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4252                 udelay(5);
4253
4254                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4255                         break;
4256         }
4257         if (j >= NVRAM_TIMEOUT_COUNT)
4258                 return -EBUSY;
4259
4260         return 0;
4261 }
4262
4263 static int
4264 bnx2_init_nvram(struct bnx2 *bp)
4265 {
4266         u32 val;
4267         int j, entry_count, rc = 0;
4268         const struct flash_spec *flash;
4269
4270         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4271                 bp->flash_info = &flash_5709;
4272                 goto get_flash_size;
4273         }
4274
4275         /* Determine the selected interface. */
4276         val = REG_RD(bp, BNX2_NVM_CFG1);
4277
4278         entry_count = ARRAY_SIZE(flash_table);
4279
4280         if (val & 0x40000000) {
4281
4282                 /* Flash interface has been reconfigured */
4283                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4284                      j++, flash++) {
4285                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4286                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4287                                 bp->flash_info = flash;
4288                                 break;
4289                         }
4290                 }
4291         }
4292         else {
4293                 u32 mask;
4294                 /* Not yet been reconfigured */
4295
4296                 if (val & (1 << 23))
4297                         mask = FLASH_BACKUP_STRAP_MASK;
4298                 else
4299                         mask = FLASH_STRAP_MASK;
4300
4301                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4302                         j++, flash++) {
4303
4304                         if ((val & mask) == (flash->strapping & mask)) {
4305                                 bp->flash_info = flash;
4306
4307                                 /* Request access to the flash interface. */
4308                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309                                         return rc;
4310
4311                                 /* Enable access to flash interface */
4312                                 bnx2_enable_nvram_access(bp);
4313
4314                                 /* Reconfigure the flash interface */
4315                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4316                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4317                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4318                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4319
4320                                 /* Disable access to flash interface */
4321                                 bnx2_disable_nvram_access(bp);
4322                                 bnx2_release_nvram_lock(bp);
4323
4324                                 break;
4325                         }
4326                 }
4327         } /* if (val & 0x40000000) */
4328
4329         if (j == entry_count) {
4330                 bp->flash_info = NULL;
4331                 pr_alert("Unknown flash/EEPROM type\n");
4332                 return -ENODEV;
4333         }
4334
4335 get_flash_size:
4336         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4337         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4338         if (val)
4339                 bp->flash_size = val;
4340         else
4341                 bp->flash_size = bp->flash_info->total_size;
4342
4343         return rc;
4344 }
4345
4346 static int
4347 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4348                 int buf_size)
4349 {
4350         int rc = 0;
4351         u32 cmd_flags, offset32, len32, extra;
4352
4353         if (buf_size == 0)
4354                 return 0;
4355
4356         /* Request access to the flash interface. */
4357         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4358                 return rc;
4359
4360         /* Enable access to flash interface */
4361         bnx2_enable_nvram_access(bp);
4362
4363         len32 = buf_size;
4364         offset32 = offset;
4365         extra = 0;
4366
4367         cmd_flags = 0;
4368
4369         if (offset32 & 3) {
4370                 u8 buf[4];
4371                 u32 pre_len;
4372
4373                 offset32 &= ~3;
4374                 pre_len = 4 - (offset & 3);
4375
4376                 if (pre_len >= len32) {
4377                         pre_len = len32;
4378                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4379                                     BNX2_NVM_COMMAND_LAST;
4380                 }
4381                 else {
4382                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383                 }
4384
4385                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4386
4387                 if (rc)
4388                         return rc;
4389
4390                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4391
4392                 offset32 += 4;
4393                 ret_buf += pre_len;
4394                 len32 -= pre_len;
4395         }
4396         if (len32 & 3) {
4397                 extra = 4 - (len32 & 3);
4398                 len32 = (len32 + 4) & ~3;
4399         }
4400
4401         if (len32 == 4) {
4402                 u8 buf[4];
4403
4404                 if (cmd_flags)
4405                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4406                 else
4407                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4408                                     BNX2_NVM_COMMAND_LAST;
4409
4410                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411
4412                 memcpy(ret_buf, buf, 4 - extra);
4413         }
4414         else if (len32 > 0) {
4415                 u8 buf[4];
4416
4417                 /* Read the first word. */
4418                 if (cmd_flags)
4419                         cmd_flags = 0;
4420                 else
4421                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4422
4423                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4424
4425                 /* Advance to the next dword. */
4426                 offset32 += 4;
4427                 ret_buf += 4;
4428                 len32 -= 4;
4429
4430                 while (len32 > 4 && rc == 0) {
4431                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4432
4433                         /* Advance to the next dword. */
4434                         offset32 += 4;
4435                         ret_buf += 4;
4436                         len32 -= 4;
4437                 }
4438
4439                 if (rc)
4440                         return rc;
4441
4442                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4443                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444
4445                 memcpy(ret_buf, buf, 4 - extra);
4446         }
4447
4448         /* Disable access to flash interface */
4449         bnx2_disable_nvram_access(bp);
4450
4451         bnx2_release_nvram_lock(bp);
4452
4453         return rc;
4454 }
4455
4456 static int
4457 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4458                 int buf_size)
4459 {
4460         u32 written, offset32, len32;
4461         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4462         int rc = 0;
4463         int align_start, align_end;
4464
4465         buf = data_buf;
4466         offset32 = offset;
4467         len32 = buf_size;
4468         align_start = align_end = 0;
4469
4470         if ((align_start = (offset32 & 3))) {
4471                 offset32 &= ~3;
4472                 len32 += align_start;
4473                 if (len32 < 4)
4474                         len32 = 4;
4475                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4476                         return rc;
4477         }
4478
4479         if (len32 & 3) {
4480                 align_end = 4 - (len32 & 3);
4481                 len32 += align_end;
4482                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4483                         return rc;
4484         }
4485
4486         if (align_start || align_end) {
4487                 align_buf = kmalloc(len32, GFP_KERNEL);
4488                 if (align_buf == NULL)
4489                         return -ENOMEM;
4490                 if (align_start) {
4491                         memcpy(align_buf, start, 4);
4492                 }
4493                 if (align_end) {
4494                         memcpy(align_buf + len32 - 4, end, 4);
4495                 }
4496                 memcpy(align_buf + align_start, data_buf, buf_size);
4497                 buf = align_buf;
4498         }
4499
4500         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4501                 flash_buffer = kmalloc(264, GFP_KERNEL);
4502                 if (flash_buffer == NULL) {
4503                         rc = -ENOMEM;
4504                         goto nvram_write_end;
4505                 }
4506         }
4507
4508         written = 0;
4509         while ((written < len32) && (rc == 0)) {
4510                 u32 page_start, page_end, data_start, data_end;
4511                 u32 addr, cmd_flags;
4512                 int i;
4513
4514                 /* Find the page_start addr */
4515                 page_start = offset32 + written;
4516                 page_start -= (page_start % bp->flash_info->page_size);
4517                 /* Find the page_end addr */
4518                 page_end = page_start + bp->flash_info->page_size;
4519                 /* Find the data_start addr */
4520                 data_start = (written == 0) ? offset32 : page_start;
4521                 /* Find the data_end addr */
4522                 data_end = (page_end > offset32 + len32) ?
4523                         (offset32 + len32) : page_end;
4524
4525                 /* Request access to the flash interface. */
4526                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4527                         goto nvram_write_end;
4528
4529                 /* Enable access to flash interface */
4530                 bnx2_enable_nvram_access(bp);
4531
4532                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4533                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4534                         int j;
4535
4536                         /* Read the whole page into the buffer
4537                          * (non-buffer flash only) */
4538                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4539                                 if (j == (bp->flash_info->page_size - 4)) {
4540                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4541                                 }
4542                                 rc = bnx2_nvram_read_dword(bp,
4543                                         page_start + j,
4544                                         &flash_buffer[j],
4545                                         cmd_flags);
4546
4547                                 if (rc)
4548                                         goto nvram_write_end;
4549
4550                                 cmd_flags = 0;
4551                         }
4552                 }
4553
4554                 /* Enable writes to flash interface (unlock write-protect) */
4555                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4556                         goto nvram_write_end;
4557
4558                 /* Loop to write back the buffer data from page_start to
4559                  * data_start */
4560                 i = 0;
4561                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4562                         /* Erase the page */
4563                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4564                                 goto nvram_write_end;
4565
4566                         /* Re-enable the write again for the actual write */
4567                         bnx2_enable_nvram_write(bp);
4568
4569                         for (addr = page_start; addr < data_start;
4570                                 addr += 4, i += 4) {
4571
4572                                 rc = bnx2_nvram_write_dword(bp, addr,
4573                                         &flash_buffer[i], cmd_flags);
4574
4575                                 if (rc != 0)
4576                                         goto nvram_write_end;
4577
4578                                 cmd_flags = 0;
4579                         }
4580                 }
4581
4582                 /* Loop to write the new data from data_start to data_end */
4583                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4584                         if ((addr == page_end - 4) ||
4585                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4586                                  (addr == data_end - 4))) {
4587
4588                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4589                         }
4590                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4591                                 cmd_flags);
4592
4593                         if (rc != 0)
4594                                 goto nvram_write_end;
4595
4596                         cmd_flags = 0;
4597                         buf += 4;
4598                 }
4599
4600                 /* Loop to write back the buffer data from data_end
4601                  * to page_end */
4602                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603                         for (addr = data_end; addr < page_end;
4604                                 addr += 4, i += 4) {
4605
4606                                 if (addr == page_end-4) {
4607                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4608                                 }
4609                                 rc = bnx2_nvram_write_dword(bp, addr,
4610                                         &flash_buffer[i], cmd_flags);
4611
4612                                 if (rc != 0)
4613                                         goto nvram_write_end;
4614
4615                                 cmd_flags = 0;
4616                         }
4617                 }
4618
4619                 /* Disable writes to flash interface (lock write-protect) */
4620                 bnx2_disable_nvram_write(bp);
4621
4622                 /* Disable access to flash interface */
4623                 bnx2_disable_nvram_access(bp);
4624                 bnx2_release_nvram_lock(bp);
4625
4626                 /* Increment written */
4627                 written += data_end - data_start;
4628         }
4629
4630 nvram_write_end:
4631         kfree(flash_buffer);
4632         kfree(align_buf);
4633         return rc;
4634 }
4635
4636 static void
4637 bnx2_init_fw_cap(struct bnx2 *bp)
4638 {
4639         u32 val, sig = 0;
4640
4641         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4642         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4643
4644         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4645                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4646
4647         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4648         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4649                 return;
4650
4651         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4652                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4654         }
4655
4656         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4657             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4658                 u32 link;
4659
4660                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4661
4662                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4663                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4664                         bp->phy_port = PORT_FIBRE;
4665                 else
4666                         bp->phy_port = PORT_TP;
4667
4668                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4669                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4670         }
4671
4672         if (netif_running(bp->dev) && sig)
4673                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4674 }
4675
4676 static void
4677 bnx2_setup_msix_tbl(struct bnx2 *bp)
4678 {
4679         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4680
4681         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4682         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4683 }
4684
4685 static int
4686 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4687 {
4688         u32 val;
4689         int i, rc = 0;
4690         u8 old_port;
4691
4692         /* Wait for the current PCI transaction to complete before
4693          * issuing a reset. */
4694         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4695             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4696                 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4697                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4698                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4699                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4700                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4701                 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4702                 udelay(5);
4703         } else {  /* 5709 */
4704                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4705                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4706                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4707                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4708
4709                 for (i = 0; i < 100; i++) {
4710                         msleep(1);
4711                         val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4712                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4713                                 break;
4714                 }
4715         }
4716
4717         /* Wait for the firmware to tell us it is ok to issue a reset. */
4718         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4719
4720         /* Deposit a driver reset signature so the firmware knows that
4721          * this is a soft reset. */
4722         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4723                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4724
4725         /* Do a dummy read to force the chip to complete all current transaction
4726          * before we issue a reset. */
4727         val = REG_RD(bp, BNX2_MISC_ID);
4728
4729         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4730                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4731                 REG_RD(bp, BNX2_MISC_COMMAND);
4732                 udelay(5);
4733
4734                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4735                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4736
4737                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4738
4739         } else {
4740                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4741                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4742                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4743
4744                 /* Chip reset. */
4745                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746
4747                 /* Reading back any register after chip reset will hang the
4748                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4749                  * of margin for write posting.
4750                  */
4751                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4752                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4753                         msleep(20);
4754
4755                 /* Reset takes approximate 30 usec */
4756                 for (i = 0; i < 10; i++) {
4757                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4758                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4759                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4760                                 break;
4761                         udelay(10);
4762                 }
4763
4764                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4765                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4766                         pr_err("Chip reset did not complete\n");
4767                         return -EBUSY;
4768                 }
4769         }
4770
4771         /* Make sure byte swapping is properly configured. */
4772         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4773         if (val != 0x01020304) {
4774                 pr_err("Chip not in correct endian mode\n");
4775                 return -ENODEV;
4776         }
4777
4778         /* Wait for the firmware to finish its initialization. */
4779         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4780         if (rc)
4781                 return rc;
4782
4783         spin_lock_bh(&bp->phy_lock);
4784         old_port = bp->phy_port;
4785         bnx2_init_fw_cap(bp);
4786         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4787             old_port != bp->phy_port)
4788                 bnx2_set_default_remote_link(bp);
4789         spin_unlock_bh(&bp->phy_lock);
4790
4791         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4792                 /* Adjust the voltage regular to two steps lower.  The default
4793                  * of this register is 0x0000000e. */
4794                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4795
4796                 /* Remove bad rbuf memory from the free pool. */
4797                 rc = bnx2_alloc_bad_rbuf(bp);
4798         }
4799
4800         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4801                 bnx2_setup_msix_tbl(bp);
4802                 /* Prevent MSIX table reads and write from timing out */
4803                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4804                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4805         }
4806
4807         return rc;
4808 }
4809
4810 static int
4811 bnx2_init_chip(struct bnx2 *bp)
4812 {
4813         u32 val, mtu;
4814         int rc, i;
4815
4816         /* Make sure the interrupt is not active. */
4817         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4818
4819         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4820               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4821 #ifdef __BIG_ENDIAN
4822               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4823 #endif
4824               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4825               DMA_READ_CHANS << 12 |
4826               DMA_WRITE_CHANS << 16;
4827
4828         val |= (0x2 << 20) | (1 << 11);
4829
4830         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4831                 val |= (1 << 23);
4832
4833         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4834             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4835                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4836
4837         REG_WR(bp, BNX2_DMA_CONFIG, val);
4838
4839         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4840                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4841                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4842                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4843         }
4844
4845         if (bp->flags & BNX2_FLAG_PCIX) {
4846                 u16 val16;
4847
4848                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4849                                      &val16);
4850                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4851                                       val16 & ~PCI_X_CMD_ERO);
4852         }
4853
4854         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4855                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4856                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4857                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4858
4859         /* Initialize context mapping and zero out the quick contexts.  The
4860          * context block must have already been enabled. */
4861         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4862                 rc = bnx2_init_5709_context(bp);
4863                 if (rc)
4864                         return rc;
4865         } else
4866                 bnx2_init_context(bp);
4867
4868         if ((rc = bnx2_init_cpus(bp)) != 0)
4869                 return rc;
4870
4871         bnx2_init_nvram(bp);
4872
4873         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4874
4875         val = REG_RD(bp, BNX2_MQ_CONFIG);
4876         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4877         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4878         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4879                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4880                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4881                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4882         }
4883
4884         REG_WR(bp, BNX2_MQ_CONFIG, val);
4885
4886         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4887         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4888         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4889
4890         val = (BCM_PAGE_BITS - 8) << 24;
4891         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4892
4893         /* Configure page size. */
4894         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4895         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4896         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4897         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4898
4899         val = bp->mac_addr[0] +
4900               (bp->mac_addr[1] << 8) +
4901               (bp->mac_addr[2] << 16) +
4902               bp->mac_addr[3] +
4903               (bp->mac_addr[4] << 8) +
4904               (bp->mac_addr[5] << 16);
4905         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4906
4907         /* Program the MTU.  Also include 4 bytes for CRC32. */
4908         mtu = bp->dev->mtu;
4909         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4910         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4911                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4912         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4913
4914         if (mtu < 1500)
4915                 mtu = 1500;
4916
4917         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4918         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4919         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4920
4921         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4922         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4923                 bp->bnx2_napi[i].last_status_idx = 0;
4924
4925         bp->idle_chk_status_idx = 0xffff;
4926
4927         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4928
4929         /* Set up how to generate a link change interrupt. */
4930         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4931
4932         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4933                (u64) bp->status_blk_mapping & 0xffffffff);
4934         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4935
4936         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4937                (u64) bp->stats_blk_mapping & 0xffffffff);
4938         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4939                (u64) bp->stats_blk_mapping >> 32);
4940
4941         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4942                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4943
4944         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4945                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4946
4947         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4948                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4949
4950         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4951
4952         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4953
4954         REG_WR(bp, BNX2_HC_COM_TICKS,
4955                (bp->com_ticks_int << 16) | bp->com_ticks);
4956
4957         REG_WR(bp, BNX2_HC_CMD_TICKS,
4958                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4959
4960         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4961                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4962         else
4963                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4964         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4965
4966         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4967                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4968         else {
4969                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4970                       BNX2_HC_CONFIG_COLLECT_STATS;
4971         }
4972
4973         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4974                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4975                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4976
4977                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4978         }
4979
4980         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4981                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4982
4983         REG_WR(bp, BNX2_HC_CONFIG, val);
4984
4985         if (bp->rx_ticks < 25)
4986                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4987         else
4988                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4989
4990         for (i = 1; i < bp->irq_nvecs; i++) {
4991                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4992                            BNX2_HC_SB_CONFIG_1;
4993
4994                 REG_WR(bp, base,
4995                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4996                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4997                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4998
4999                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5000                         (bp->tx_quick_cons_trip_int << 16) |
5001                          bp->tx_quick_cons_trip);
5002
5003                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5004                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5005
5006                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5007                        (bp->rx_quick_cons_trip_int << 16) |
5008                         bp->rx_quick_cons_trip);
5009
5010                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5011                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5012         }
5013
5014         /* Clear internal stats counters. */
5015         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5016
5017         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5018
5019         /* Initialize the receive filter. */
5020         bnx2_set_rx_mode(bp->dev);
5021
5022         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5023                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5024                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5025                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5026         }
5027         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5028                           1, 0);
5029
5030         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5031         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5032
5033         udelay(20);
5034
5035         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5036
5037         return rc;
5038 }
5039
5040 static void
5041 bnx2_clear_ring_states(struct bnx2 *bp)
5042 {
5043         struct bnx2_napi *bnapi;
5044         struct bnx2_tx_ring_info *txr;
5045         struct bnx2_rx_ring_info *rxr;
5046         int i;
5047
5048         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5049                 bnapi = &bp->bnx2_napi[i];
5050                 txr = &bnapi->tx_ring;
5051                 rxr = &bnapi->rx_ring;
5052
5053                 txr->tx_cons = 0;
5054                 txr->hw_tx_cons = 0;
5055                 rxr->rx_prod_bseq = 0;
5056                 rxr->rx_prod = 0;
5057                 rxr->rx_cons = 0;
5058                 rxr->rx_pg_prod = 0;
5059                 rxr->rx_pg_cons = 0;
5060         }
5061 }
5062
5063 static void
5064 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5065 {
5066         u32 val, offset0, offset1, offset2, offset3;
5067         u32 cid_addr = GET_CID_ADDR(cid);
5068
5069         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5070                 offset0 = BNX2_L2CTX_TYPE_XI;
5071                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5072                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5073                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5074         } else {
5075                 offset0 = BNX2_L2CTX_TYPE;
5076                 offset1 = BNX2_L2CTX_CMD_TYPE;
5077                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5078                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5079         }
5080         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5081         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5082
5083         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5084         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5085
5086         val = (u64) txr->tx_desc_mapping >> 32;
5087         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5088
5089         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5090         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5091 }
5092
5093 static void
5094 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5095 {
5096         struct tx_bd *txbd;
5097         u32 cid = TX_CID;
5098         struct bnx2_napi *bnapi;
5099         struct bnx2_tx_ring_info *txr;
5100
5101         bnapi = &bp->bnx2_napi[ring_num];
5102         txr = &bnapi->tx_ring;
5103
5104         if (ring_num == 0)
5105                 cid = TX_CID;
5106         else
5107                 cid = TX_TSS_CID + ring_num - 1;
5108
5109         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5110
5111         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5112
5113         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5114         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5115
5116         txr->tx_prod = 0;
5117         txr->tx_prod_bseq = 0;
5118
5119         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5120         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5121
5122         bnx2_init_tx_context(bp, cid, txr);
5123 }
5124
5125 static void
5126 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5127                      int num_rings)
5128 {
5129         int i;
5130         struct rx_bd *rxbd;
5131
5132         for (i = 0; i < num_rings; i++) {
5133                 int j;
5134
5135                 rxbd = &rx_ring[i][0];
5136                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5137                         rxbd->rx_bd_len = buf_size;
5138                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5139                 }
5140                 if (i == (num_rings - 1))
5141                         j = 0;
5142                 else
5143                         j = i + 1;
5144                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5145                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5146         }
5147 }
5148
5149 static void
5150 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5151 {
5152         int i;
5153         u16 prod, ring_prod;
5154         u32 cid, rx_cid_addr, val;
5155         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5156         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5157
5158         if (ring_num == 0)
5159                 cid = RX_CID;
5160         else
5161                 cid = RX_RSS_CID + ring_num - 1;
5162
5163         rx_cid_addr = GET_CID_ADDR(cid);
5164
5165         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5166                              bp->rx_buf_use_size, bp->rx_max_ring);
5167
5168         bnx2_init_rx_context(bp, cid);
5169
5170         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5171                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5172                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5173         }
5174
5175         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5176         if (bp->rx_pg_ring_size) {
5177                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5178                                      rxr->rx_pg_desc_mapping,
5179                                      PAGE_SIZE, bp->rx_max_pg_ring);
5180                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5181                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5182                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5183                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5184
5185                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5186                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5187
5188                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5189                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5190
5191                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5192                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5193         }
5194
5195         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5196         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5197
5198         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5199         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5200
5201         ring_prod = prod = rxr->rx_pg_prod;
5202         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5203                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5204                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5205                                     ring_num, i, bp->rx_pg_ring_size);
5206                         break;
5207                 }
5208                 prod = NEXT_RX_BD(prod);
5209                 ring_prod = RX_PG_RING_IDX(prod);
5210         }
5211         rxr->rx_pg_prod = prod;
5212
5213         ring_prod = prod = rxr->rx_prod;
5214         for (i = 0; i < bp->rx_ring_size; i++) {
5215                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5216                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5217                                     ring_num, i, bp->rx_ring_size);
5218                         break;
5219                 }
5220                 prod = NEXT_RX_BD(prod);
5221                 ring_prod = RX_RING_IDX(prod);
5222         }
5223         rxr->rx_prod = prod;
5224
5225         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5226         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5227         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5228
5229         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5230         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5231
5232         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5233 }
5234
5235 static void
5236 bnx2_init_all_rings(struct bnx2 *bp)
5237 {
5238         int i;
5239         u32 val;
5240
5241         bnx2_clear_ring_states(bp);
5242
5243         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5244         for (i = 0; i < bp->num_tx_rings; i++)
5245                 bnx2_init_tx_ring(bp, i);
5246
5247         if (bp->num_tx_rings > 1)
5248                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5249                        (TX_TSS_CID << 7));
5250
5251         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5252         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5253
5254         for (i = 0; i < bp->num_rx_rings; i++)
5255                 bnx2_init_rx_ring(bp, i);
5256
5257         if (bp->num_rx_rings > 1) {
5258                 u32 tbl_32 = 0;
5259
5260                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5261                         int shift = (i % 8) << 2;
5262
5263                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5264                         if ((i % 8) == 7) {
5265                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5266                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5267                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5268                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5269                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5270                                 tbl_32 = 0;
5271                         }
5272                 }
5273
5274                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5275                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5276
5277                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5278
5279         }
5280 }
5281
5282 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5283 {
5284         u32 max, num_rings = 1;
5285
5286         while (ring_size > MAX_RX_DESC_CNT) {
5287                 ring_size -= MAX_RX_DESC_CNT;
5288                 num_rings++;
5289         }
5290         /* round to next power of 2 */
5291         max = max_size;
5292         while ((max & num_rings) == 0)
5293                 max >>= 1;
5294
5295         if (num_rings != max)
5296                 max <<= 1;
5297
5298         return max;
5299 }
5300
5301 static void
5302 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5303 {
5304         u32 rx_size, rx_space, jumbo_size;
5305
5306         /* 8 for CRC and VLAN */
5307         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5308
5309         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5310                 sizeof(struct skb_shared_info);
5311
5312         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5313         bp->rx_pg_ring_size = 0;
5314         bp->rx_max_pg_ring = 0;
5315         bp->rx_max_pg_ring_idx = 0;
5316         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5317                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5318
5319                 jumbo_size = size * pages;
5320                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5321                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5322
5323                 bp->rx_pg_ring_size = jumbo_size;
5324                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5325                                                         MAX_RX_PG_RINGS);
5326                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5327                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5328                 bp->rx_copy_thresh = 0;
5329         }
5330
5331         bp->rx_buf_use_size = rx_size;
5332         /* hw alignment */
5333         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5334         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5335         bp->rx_ring_size = size;
5336         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5337         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5338 }
5339
5340 static void
5341 bnx2_free_tx_skbs(struct bnx2 *bp)
5342 {
5343         int i;
5344
5345         for (i = 0; i < bp->num_tx_rings; i++) {
5346                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5347                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5348                 int j;
5349
5350                 if (txr->tx_buf_ring == NULL)
5351                         continue;
5352
5353                 for (j = 0; j < TX_DESC_CNT; ) {
5354                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5355                         struct sk_buff *skb = tx_buf->skb;
5356                         int k, last;
5357
5358                         if (skb == NULL) {
5359                                 j++;
5360                                 continue;
5361                         }
5362
5363                         dma_unmap_single(&bp->pdev->dev,
5364                                          dma_unmap_addr(tx_buf, mapping),
5365                                          skb_headlen(skb),
5366                                          PCI_DMA_TODEVICE);
5367
5368                         tx_buf->skb = NULL;
5369
5370                         last = tx_buf->nr_frags;
5371                         j++;
5372                         for (k = 0; k < last; k++, j++) {
5373                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5374                                 dma_unmap_page(&bp->pdev->dev,
5375                                         dma_unmap_addr(tx_buf, mapping),
5376                                         skb_shinfo(skb)->frags[k].size,
5377                                         PCI_DMA_TODEVICE);
5378                         }
5379                         dev_kfree_skb(skb);
5380                 }
5381         }
5382 }
5383
5384 static void
5385 bnx2_free_rx_skbs(struct bnx2 *bp)
5386 {
5387         int i;
5388
5389         for (i = 0; i < bp->num_rx_rings; i++) {
5390                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5391                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5392                 int j;
5393
5394                 if (rxr->rx_buf_ring == NULL)
5395                         return;
5396
5397                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5398                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5399                         struct sk_buff *skb = rx_buf->skb;
5400
5401                         if (skb == NULL)
5402                                 continue;
5403
5404                         dma_unmap_single(&bp->pdev->dev,
5405                                          dma_unmap_addr(rx_buf, mapping),
5406                                          bp->rx_buf_use_size,
5407                                          PCI_DMA_FROMDEVICE);
5408
5409                         rx_buf->skb = NULL;
5410
5411                         dev_kfree_skb(skb);
5412                 }
5413                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5414                         bnx2_free_rx_page(bp, rxr, j);
5415         }
5416 }
5417
5418 static void
5419 bnx2_free_skbs(struct bnx2 *bp)
5420 {
5421         bnx2_free_tx_skbs(bp);
5422         bnx2_free_rx_skbs(bp);
5423 }
5424
5425 static int
5426 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5427 {
5428         int rc;
5429
5430         rc = bnx2_reset_chip(bp, reset_code);
5431         bnx2_free_skbs(bp);
5432         if (rc)
5433                 return rc;
5434
5435         if ((rc = bnx2_init_chip(bp)) != 0)
5436                 return rc;
5437
5438         bnx2_init_all_rings(bp);
5439         return 0;
5440 }
5441
5442 static int
5443 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5444 {
5445         int rc;
5446
5447         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5448                 return rc;
5449
5450         spin_lock_bh(&bp->phy_lock);
5451         bnx2_init_phy(bp, reset_phy);
5452         bnx2_set_link(bp);
5453         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5454                 bnx2_remote_phy_event(bp);
5455         spin_unlock_bh(&bp->phy_lock);
5456         return 0;
5457 }
5458
5459 static int
5460 bnx2_shutdown_chip(struct bnx2 *bp)
5461 {
5462         u32 reset_code;
5463
5464         if (bp->flags & BNX2_FLAG_NO_WOL)
5465                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5466         else if (bp->wol)
5467                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5468         else
5469                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5470
5471         return bnx2_reset_chip(bp, reset_code);
5472 }
5473
5474 static int
5475 bnx2_test_registers(struct bnx2 *bp)
5476 {
5477         int ret;
5478         int i, is_5709;
5479         static const struct {
5480                 u16   offset;
5481                 u16   flags;
5482 #define BNX2_FL_NOT_5709        1
5483                 u32   rw_mask;
5484                 u32   ro_mask;
5485         } reg_tbl[] = {
5486                 { 0x006c, 0, 0x00000000, 0x0000003f },
5487                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5488                 { 0x0094, 0, 0x00000000, 0x00000000 },
5489
5490                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5491                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5492                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5493                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5494                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5495                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5496                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5497                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5498                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5499
5500                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5501                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5502                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5503                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5504                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5505                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5506
5507                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5509                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5510
5511                 { 0x1000, 0, 0x00000000, 0x00000001 },
5512                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5513
5514                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5515                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5516                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5517                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5518                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5519                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5520                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5521                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5522                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5523                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5524
5525                 { 0x1800, 0, 0x00000000, 0x00000001 },
5526                 { 0x1804, 0, 0x00000000, 0x00000003 },
5527
5528                 { 0x2800, 0, 0x00000000, 0x00000001 },
5529                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5530                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5531                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5532                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5533                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5534                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5535                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5536                 { 0x2840, 0, 0x00000000, 0xffffffff },
5537                 { 0x2844, 0, 0x00000000, 0xffffffff },
5538                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5539                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5540
5541                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5542                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5543
5544                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5545                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5546                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5547                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5548                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5549                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5550                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5551                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5552                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5553
5554                 { 0x5004, 0, 0x00000000, 0x0000007f },
5555                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5556
5557                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5558                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5559                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5560                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5561                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5562                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5563                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5564                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5565                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5566
5567                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5568                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5569                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5570                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5571                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5572                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5573                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5574                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5575                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5576                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5577                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5578                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5579                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5580                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5581                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5582                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5583                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5584                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5585                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5586                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5587                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5588                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5589                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5590
5591                 { 0xffff, 0, 0x00000000, 0x00000000 },
5592         };
5593
5594         ret = 0;
5595         is_5709 = 0;
5596         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5597                 is_5709 = 1;
5598
5599         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5600                 u32 offset, rw_mask, ro_mask, save_val, val;
5601                 u16 flags = reg_tbl[i].flags;
5602
5603                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5604                         continue;
5605
5606                 offset = (u32) reg_tbl[i].offset;
5607                 rw_mask = reg_tbl[i].rw_mask;
5608                 ro_mask = reg_tbl[i].ro_mask;
5609
5610                 save_val = readl(bp->regview + offset);
5611
5612                 writel(0, bp->regview + offset);
5613
5614                 val = readl(bp->regview + offset);
5615                 if ((val & rw_mask) != 0) {
5616                         goto reg_test_err;
5617                 }
5618
5619                 if ((val & ro_mask) != (save_val & ro_mask)) {
5620                         goto reg_test_err;
5621                 }
5622
5623                 writel(0xffffffff, bp->regview + offset);
5624
5625                 val = readl(bp->regview + offset);
5626                 if ((val & rw_mask) != rw_mask) {
5627                         goto reg_test_err;
5628                 }
5629
5630                 if ((val & ro_mask) != (save_val & ro_mask)) {
5631                         goto reg_test_err;
5632                 }
5633
5634                 writel(save_val, bp->regview + offset);
5635                 continue;
5636
5637 reg_test_err:
5638                 writel(save_val, bp->regview + offset);
5639                 ret = -ENODEV;
5640                 break;
5641         }
5642         return ret;
5643 }
5644
5645 static int
5646 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5647 {
5648         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5649                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5650         int i;
5651
5652         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5653                 u32 offset;
5654
5655                 for (offset = 0; offset < size; offset += 4) {
5656
5657                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5658
5659                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5660                                 test_pattern[i]) {
5661                                 return -ENODEV;
5662                         }
5663                 }
5664         }
5665         return 0;
5666 }
5667
5668 static int
5669 bnx2_test_memory(struct bnx2 *bp)
5670 {
5671         int ret = 0;
5672         int i;
5673         static struct mem_entry {
5674                 u32   offset;
5675                 u32   len;
5676         } mem_tbl_5706[] = {
5677                 { 0x60000,  0x4000 },
5678                 { 0xa0000,  0x3000 },
5679                 { 0xe0000,  0x4000 },
5680                 { 0x120000, 0x4000 },
5681                 { 0x1a0000, 0x4000 },
5682                 { 0x160000, 0x4000 },
5683                 { 0xffffffff, 0    },
5684         },
5685         mem_tbl_5709[] = {
5686                 { 0x60000,  0x4000 },
5687                 { 0xa0000,  0x3000 },
5688                 { 0xe0000,  0x4000 },
5689                 { 0x120000, 0x4000 },
5690                 { 0x1a0000, 0x4000 },
5691                 { 0xffffffff, 0    },
5692         };
5693         struct mem_entry *mem_tbl;
5694
5695         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5696                 mem_tbl = mem_tbl_5709;
5697         else
5698                 mem_tbl = mem_tbl_5706;
5699
5700         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5701                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5702                         mem_tbl[i].len)) != 0) {
5703                         return ret;
5704                 }
5705         }
5706
5707         return ret;
5708 }
5709
5710 #define BNX2_MAC_LOOPBACK       0
5711 #define BNX2_PHY_LOOPBACK       1
5712
5713 static int
5714 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5715 {
5716         unsigned int pkt_size, num_pkts, i;
5717         struct sk_buff *skb, *rx_skb;
5718         unsigned char *packet;
5719         u16 rx_start_idx, rx_idx;
5720         dma_addr_t map;
5721         struct tx_bd *txbd;
5722         struct sw_bd *rx_buf;
5723         struct l2_fhdr *rx_hdr;
5724         int ret = -ENODEV;
5725         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5726         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5727         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5728
5729         tx_napi = bnapi;
5730
5731         txr = &tx_napi->tx_ring;
5732         rxr = &bnapi->rx_ring;
5733         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5734                 bp->loopback = MAC_LOOPBACK;
5735                 bnx2_set_mac_loopback(bp);
5736         }
5737         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5738                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5739                         return 0;
5740
5741                 bp->loopback = PHY_LOOPBACK;
5742                 bnx2_set_phy_loopback(bp);
5743         }
5744         else
5745                 return -EINVAL;
5746
5747         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5748         skb = netdev_alloc_skb(bp->dev, pkt_size);
5749         if (!skb)
5750                 return -ENOMEM;
5751         packet = skb_put(skb, pkt_size);
5752         memcpy(packet, bp->dev->dev_addr, 6);
5753         memset(packet + 6, 0x0, 8);
5754         for (i = 14; i < pkt_size; i++)
5755                 packet[i] = (unsigned char) (i & 0xff);
5756
5757         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5758                              PCI_DMA_TODEVICE);
5759         if (dma_mapping_error(&bp->pdev->dev, map)) {
5760                 dev_kfree_skb(skb);
5761                 return -EIO;
5762         }
5763
5764         REG_WR(bp, BNX2_HC_COMMAND,
5765                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5766
5767         REG_RD(bp, BNX2_HC_COMMAND);
5768
5769         udelay(5);
5770         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5771
5772         num_pkts = 0;
5773
5774         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5775
5776         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5777         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5778         txbd->tx_bd_mss_nbytes = pkt_size;
5779         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5780
5781         num_pkts++;
5782         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5783         txr->tx_prod_bseq += pkt_size;
5784
5785         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5786         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5787
5788         udelay(100);
5789
5790         REG_WR(bp, BNX2_HC_COMMAND,
5791                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5792
5793         REG_RD(bp, BNX2_HC_COMMAND);
5794
5795         udelay(5);
5796
5797         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5798         dev_kfree_skb(skb);
5799
5800         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5801                 goto loopback_test_done;
5802
5803         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5804         if (rx_idx != rx_start_idx + num_pkts) {
5805                 goto loopback_test_done;
5806         }
5807
5808         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5809         rx_skb = rx_buf->skb;
5810
5811         rx_hdr = rx_buf->desc;
5812         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5813
5814         dma_sync_single_for_cpu(&bp->pdev->dev,
5815                 dma_unmap_addr(rx_buf, mapping),
5816                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5817
5818         if (rx_hdr->l2_fhdr_status &
5819                 (L2_FHDR_ERRORS_BAD_CRC |
5820                 L2_FHDR_ERRORS_PHY_DECODE |
5821                 L2_FHDR_ERRORS_ALIGNMENT |
5822                 L2_FHDR_ERRORS_TOO_SHORT |
5823                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5824
5825                 goto loopback_test_done;
5826         }
5827
5828         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5829                 goto loopback_test_done;
5830         }
5831
5832         for (i = 14; i < pkt_size; i++) {
5833                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5834                         goto loopback_test_done;
5835                 }
5836         }
5837
5838         ret = 0;
5839
5840 loopback_test_done:
5841         bp->loopback = 0;
5842         return ret;
5843 }
5844
5845 #define BNX2_MAC_LOOPBACK_FAILED        1
5846 #define BNX2_PHY_LOOPBACK_FAILED        2
5847 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5848                                          BNX2_PHY_LOOPBACK_FAILED)
5849
5850 static int
5851 bnx2_test_loopback(struct bnx2 *bp)
5852 {
5853         int rc = 0;
5854
5855         if (!netif_running(bp->dev))
5856                 return BNX2_LOOPBACK_FAILED;
5857
5858         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5859         spin_lock_bh(&bp->phy_lock);
5860         bnx2_init_phy(bp, 1);
5861         spin_unlock_bh(&bp->phy_lock);
5862         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5863                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5864         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5865                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5866         return rc;
5867 }
5868
5869 #define NVRAM_SIZE 0x200
5870 #define CRC32_RESIDUAL 0xdebb20e3
5871
5872 static int
5873 bnx2_test_nvram(struct bnx2 *bp)
5874 {
5875         __be32 buf[NVRAM_SIZE / 4];
5876         u8 *data = (u8 *) buf;
5877         int rc = 0;
5878         u32 magic, csum;
5879
5880         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5881                 goto test_nvram_done;
5882
5883         magic = be32_to_cpu(buf[0]);
5884         if (magic != 0x669955aa) {
5885                 rc = -ENODEV;
5886                 goto test_nvram_done;
5887         }
5888
5889         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5890                 goto test_nvram_done;
5891
5892         csum = ether_crc_le(0x100, data);
5893         if (csum != CRC32_RESIDUAL) {
5894                 rc = -ENODEV;
5895                 goto test_nvram_done;
5896         }
5897
5898         csum = ether_crc_le(0x100, data + 0x100);
5899         if (csum != CRC32_RESIDUAL) {
5900                 rc = -ENODEV;
5901         }
5902
5903 test_nvram_done:
5904         return rc;
5905 }
5906
5907 static int
5908 bnx2_test_link(struct bnx2 *bp)
5909 {
5910         u32 bmsr;
5911
5912         if (!netif_running(bp->dev))
5913                 return -ENODEV;
5914
5915         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5916                 if (bp->link_up)
5917                         return 0;
5918                 return -ENODEV;
5919         }
5920         spin_lock_bh(&bp->phy_lock);
5921         bnx2_enable_bmsr1(bp);
5922         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5923         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5924         bnx2_disable_bmsr1(bp);
5925         spin_unlock_bh(&bp->phy_lock);
5926
5927         if (bmsr & BMSR_LSTATUS) {
5928                 return 0;
5929         }
5930         return -ENODEV;
5931 }
5932
5933 static int
5934 bnx2_test_intr(struct bnx2 *bp)
5935 {
5936         int i;
5937         u16 status_idx;
5938
5939         if (!netif_running(bp->dev))
5940                 return -ENODEV;
5941
5942         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5943
5944         /* This register is not touched during run-time. */
5945         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5946         REG_RD(bp, BNX2_HC_COMMAND);
5947
5948         for (i = 0; i < 10; i++) {
5949                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5950                         status_idx) {
5951
5952                         break;
5953                 }
5954
5955                 msleep_interruptible(10);
5956         }
5957         if (i < 10)
5958                 return 0;
5959
5960         return -ENODEV;
5961 }
5962
5963 /* Determining link for parallel detection. */
5964 static int
5965 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5966 {
5967         u32 mode_ctl, an_dbg, exp;
5968
5969         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5970                 return 0;
5971
5972         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5973         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5974
5975         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5976                 return 0;
5977
5978         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5979         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5980         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5981
5982         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5983                 return 0;
5984
5985         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5986         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5987         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5988
5989         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5990                 return 0;
5991
5992         return 1;
5993 }
5994
5995 static void
5996 bnx2_5706_serdes_timer(struct bnx2 *bp)
5997 {
5998         int check_link = 1;
5999
6000         spin_lock(&bp->phy_lock);
6001         if (bp->serdes_an_pending) {
6002                 bp->serdes_an_pending--;
6003                 check_link = 0;
6004         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6005                 u32 bmcr;
6006
6007                 bp->current_interval = BNX2_TIMER_INTERVAL;
6008
6009                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6010
6011                 if (bmcr & BMCR_ANENABLE) {
6012                         if (bnx2_5706_serdes_has_link(bp)) {
6013                                 bmcr &= ~BMCR_ANENABLE;
6014                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6015                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6016                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6017                         }
6018                 }
6019         }
6020         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6021                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6022                 u32 phy2;
6023
6024                 bnx2_write_phy(bp, 0x17, 0x0f01);
6025                 bnx2_read_phy(bp, 0x15, &phy2);
6026                 if (phy2 & 0x20) {
6027                         u32 bmcr;
6028
6029