[BNX2]: Move .h files to bnx2.c
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.4.38"
58 #define DRV_MODULE_RELDATE      "February 10, 2006"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86 } board_t;
87
88 /* indexed by board_t, above */
89 static const struct {
90         char *name;
91 } board_info[] __devinitdata = {
92         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
93         { "HP NC370T Multifunction Gigabit Server Adapter" },
94         { "HP NC370i Multifunction Gigabit Server Adapter" },
95         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
96         { "HP NC370F Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
99         };
100
101 static struct pci_device_id bnx2_pci_tbl[] = {
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
103           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
111           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
116         { 0, }
117 };
118
119 static struct flash_spec flash_table[] =
120 {
121         /* Slow EEPROM */
122         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
123          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
124          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
125          "EEPROM - slow"},
126         /* Expansion entry 0001 */
127         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
128          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
129          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
130          "Entry 0001"},
131         /* Saifun SA25F010 (non-buffered flash) */
132         /* strap, cfg1, & write1 need updates */
133         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
134          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
135          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
136          "Non-buffered flash (128kB)"},
137         /* Saifun SA25F020 (non-buffered flash) */
138         /* strap, cfg1, & write1 need updates */
139         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
140          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
142          "Non-buffered flash (256kB)"},
143         /* Expansion entry 0100 */
144         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
145          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
146          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147          "Entry 0100"},
148         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
149         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
150          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
151          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
152          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
153         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
154         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
155          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
156          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
157          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
158         /* Saifun SA25F005 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
161          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
163          "Non-buffered flash (64kB)"},
164         /* Fast EEPROM */
165         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
166          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
167          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
168          "EEPROM - fast"},
169         /* Expansion entry 1001 */
170         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
171          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173          "Entry 1001"},
174         /* Expansion entry 1010 */
175         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
176          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178          "Entry 1010"},
179         /* ATMEL AT45DB011B (buffered flash) */
180         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
181          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
182          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
183          "Buffered flash (128kB)"},
184         /* Expansion entry 1100 */
185         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
186          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
188          "Entry 1100"},
189         /* Expansion entry 1101 */
190         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
191          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
193          "Entry 1101"},
194         /* Ateml Expansion entry 1110 */
195         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
196          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
197          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
198          "Entry 1110 (Atmel)"},
199         /* ATMEL AT45DB021B (buffered flash) */
200         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
201          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
202          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
203          "Buffered flash (256kB)"},
204 };
205
206 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
207
208 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
209 {
210         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
211
212         if (diff > MAX_TX_DESC_CNT)
213                 diff = (diff & MAX_TX_DESC_CNT) - 1;
214         return (bp->tx_ring_size - diff);
215 }
216
217 static u32
218 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
219 {
220         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
221         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
222 }
223
224 static void
225 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
226 {
227         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
228         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
229 }
230
231 static void
232 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
233 {
234         offset += cid_addr;
235         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
236         REG_WR(bp, BNX2_CTX_DATA, val);
237 }
238
239 static int
240 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
241 {
242         u32 val1;
243         int i, ret;
244
245         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
246                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
247                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
248
249                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
250                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251
252                 udelay(40);
253         }
254
255         val1 = (bp->phy_addr << 21) | (reg << 16) |
256                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
257                 BNX2_EMAC_MDIO_COMM_START_BUSY;
258         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
259
260         for (i = 0; i < 50; i++) {
261                 udelay(10);
262
263                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
264                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
265                         udelay(5);
266
267                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
269
270                         break;
271                 }
272         }
273
274         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
275                 *val = 0x0;
276                 ret = -EBUSY;
277         }
278         else {
279                 *val = val1;
280                 ret = 0;
281         }
282
283         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
284                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
285                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
286
287                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
288                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289
290                 udelay(40);
291         }
292
293         return ret;
294 }
295
296 static int
297 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
298 {
299         u32 val1;
300         int i, ret;
301
302         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
303                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
305
306                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
307                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308
309                 udelay(40);
310         }
311
312         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
313                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
314                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
315         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
316     
317         for (i = 0; i < 50; i++) {
318                 udelay(10);
319
320                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
321                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
322                         udelay(5);
323                         break;
324                 }
325         }
326
327         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
328                 ret = -EBUSY;
329         else
330                 ret = 0;
331
332         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
333                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
334                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
335
336                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
337                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338
339                 udelay(40);
340         }
341
342         return ret;
343 }
344
345 static void
346 bnx2_disable_int(struct bnx2 *bp)
347 {
348         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
349                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
350         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
351 }
352
353 static void
354 bnx2_enable_int(struct bnx2 *bp)
355 {
356         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
357                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
358                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
359
360         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
362
363         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
364 }
365
366 static void
367 bnx2_disable_int_sync(struct bnx2 *bp)
368 {
369         atomic_inc(&bp->intr_sem);
370         bnx2_disable_int(bp);
371         synchronize_irq(bp->pdev->irq);
372 }
373
374 static void
375 bnx2_netif_stop(struct bnx2 *bp)
376 {
377         bnx2_disable_int_sync(bp);
378         if (netif_running(bp->dev)) {
379                 netif_poll_disable(bp->dev);
380                 netif_tx_disable(bp->dev);
381                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
382         }
383 }
384
385 static void
386 bnx2_netif_start(struct bnx2 *bp)
387 {
388         if (atomic_dec_and_test(&bp->intr_sem)) {
389                 if (netif_running(bp->dev)) {
390                         netif_wake_queue(bp->dev);
391                         netif_poll_enable(bp->dev);
392                         bnx2_enable_int(bp);
393                 }
394         }
395 }
396
397 static void
398 bnx2_free_mem(struct bnx2 *bp)
399 {
400         int i;
401
402         if (bp->status_blk) {
403                 pci_free_consistent(bp->pdev, bp->status_stats_size,
404                                     bp->status_blk, bp->status_blk_mapping);
405                 bp->status_blk = NULL;
406                 bp->stats_blk = NULL;
407         }
408         if (bp->tx_desc_ring) {
409                 pci_free_consistent(bp->pdev,
410                                     sizeof(struct tx_bd) * TX_DESC_CNT,
411                                     bp->tx_desc_ring, bp->tx_desc_mapping);
412                 bp->tx_desc_ring = NULL;
413         }
414         kfree(bp->tx_buf_ring);
415         bp->tx_buf_ring = NULL;
416         for (i = 0; i < bp->rx_max_ring; i++) {
417                 if (bp->rx_desc_ring[i])
418                         pci_free_consistent(bp->pdev,
419                                             sizeof(struct rx_bd) * RX_DESC_CNT,
420                                             bp->rx_desc_ring[i],
421                                             bp->rx_desc_mapping[i]);
422                 bp->rx_desc_ring[i] = NULL;
423         }
424         vfree(bp->rx_buf_ring);
425         bp->rx_buf_ring = NULL;
426 }
427
428 static int
429 bnx2_alloc_mem(struct bnx2 *bp)
430 {
431         int i, status_blk_size;
432
433         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
434                                   GFP_KERNEL);
435         if (bp->tx_buf_ring == NULL)
436                 return -ENOMEM;
437
438         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
439                                                 sizeof(struct tx_bd) *
440                                                 TX_DESC_CNT,
441                                                 &bp->tx_desc_mapping);
442         if (bp->tx_desc_ring == NULL)
443                 goto alloc_mem_err;
444
445         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
446                                   bp->rx_max_ring);
447         if (bp->rx_buf_ring == NULL)
448                 goto alloc_mem_err;
449
450         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
451                                    bp->rx_max_ring);
452
453         for (i = 0; i < bp->rx_max_ring; i++) {
454                 bp->rx_desc_ring[i] =
455                         pci_alloc_consistent(bp->pdev,
456                                              sizeof(struct rx_bd) * RX_DESC_CNT,
457                                              &bp->rx_desc_mapping[i]);
458                 if (bp->rx_desc_ring[i] == NULL)
459                         goto alloc_mem_err;
460
461         }
462
463         /* Combine status and statistics blocks into one allocation. */
464         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
465         bp->status_stats_size = status_blk_size +
466                                 sizeof(struct statistics_block);
467
468         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
469                                               &bp->status_blk_mapping);
470         if (bp->status_blk == NULL)
471                 goto alloc_mem_err;
472
473         memset(bp->status_blk, 0, bp->status_stats_size);
474
475         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
476                                   status_blk_size);
477
478         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
479
480         return 0;
481
482 alloc_mem_err:
483         bnx2_free_mem(bp);
484         return -ENOMEM;
485 }
486
487 static void
488 bnx2_report_fw_link(struct bnx2 *bp)
489 {
490         u32 fw_link_status = 0;
491
492         if (bp->link_up) {
493                 u32 bmsr;
494
495                 switch (bp->line_speed) {
496                 case SPEED_10:
497                         if (bp->duplex == DUPLEX_HALF)
498                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
499                         else
500                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
501                         break;
502                 case SPEED_100:
503                         if (bp->duplex == DUPLEX_HALF)
504                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
505                         else
506                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
507                         break;
508                 case SPEED_1000:
509                         if (bp->duplex == DUPLEX_HALF)
510                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
511                         else
512                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
513                         break;
514                 case SPEED_2500:
515                         if (bp->duplex == DUPLEX_HALF)
516                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
517                         else
518                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
519                         break;
520                 }
521
522                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
523
524                 if (bp->autoneg) {
525                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
526
527                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
528                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
529
530                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
531                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
532                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
533                         else
534                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
535                 }
536         }
537         else
538                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
539
540         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
541 }
542
543 static void
544 bnx2_report_link(struct bnx2 *bp)
545 {
546         if (bp->link_up) {
547                 netif_carrier_on(bp->dev);
548                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
549
550                 printk("%d Mbps ", bp->line_speed);
551
552                 if (bp->duplex == DUPLEX_FULL)
553                         printk("full duplex");
554                 else
555                         printk("half duplex");
556
557                 if (bp->flow_ctrl) {
558                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
559                                 printk(", receive ");
560                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
561                                         printk("& transmit ");
562                         }
563                         else {
564                                 printk(", transmit ");
565                         }
566                         printk("flow control ON");
567                 }
568                 printk("\n");
569         }
570         else {
571                 netif_carrier_off(bp->dev);
572                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
573         }
574
575         bnx2_report_fw_link(bp);
576 }
577
578 static void
579 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
580 {
581         u32 local_adv, remote_adv;
582
583         bp->flow_ctrl = 0;
584         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
585                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
586
587                 if (bp->duplex == DUPLEX_FULL) {
588                         bp->flow_ctrl = bp->req_flow_ctrl;
589                 }
590                 return;
591         }
592
593         if (bp->duplex != DUPLEX_FULL) {
594                 return;
595         }
596
597         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
598             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
599                 u32 val;
600
601                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
602                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
603                         bp->flow_ctrl |= FLOW_CTRL_TX;
604                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
605                         bp->flow_ctrl |= FLOW_CTRL_RX;
606                 return;
607         }
608
609         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
610         bnx2_read_phy(bp, MII_LPA, &remote_adv);
611
612         if (bp->phy_flags & PHY_SERDES_FLAG) {
613                 u32 new_local_adv = 0;
614                 u32 new_remote_adv = 0;
615
616                 if (local_adv & ADVERTISE_1000XPAUSE)
617                         new_local_adv |= ADVERTISE_PAUSE_CAP;
618                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
619                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
620                 if (remote_adv & ADVERTISE_1000XPAUSE)
621                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
622                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
623                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
624
625                 local_adv = new_local_adv;
626                 remote_adv = new_remote_adv;
627         }
628
629         /* See Table 28B-3 of 802.3ab-1999 spec. */
630         if (local_adv & ADVERTISE_PAUSE_CAP) {
631                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
632                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
633                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
634                         }
635                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
636                                 bp->flow_ctrl = FLOW_CTRL_RX;
637                         }
638                 }
639                 else {
640                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
641                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
642                         }
643                 }
644         }
645         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
646                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
647                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
648
649                         bp->flow_ctrl = FLOW_CTRL_TX;
650                 }
651         }
652 }
653
654 static int
655 bnx2_5708s_linkup(struct bnx2 *bp)
656 {
657         u32 val;
658
659         bp->link_up = 1;
660         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
661         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
662                 case BCM5708S_1000X_STAT1_SPEED_10:
663                         bp->line_speed = SPEED_10;
664                         break;
665                 case BCM5708S_1000X_STAT1_SPEED_100:
666                         bp->line_speed = SPEED_100;
667                         break;
668                 case BCM5708S_1000X_STAT1_SPEED_1G:
669                         bp->line_speed = SPEED_1000;
670                         break;
671                 case BCM5708S_1000X_STAT1_SPEED_2G5:
672                         bp->line_speed = SPEED_2500;
673                         break;
674         }
675         if (val & BCM5708S_1000X_STAT1_FD)
676                 bp->duplex = DUPLEX_FULL;
677         else
678                 bp->duplex = DUPLEX_HALF;
679
680         return 0;
681 }
682
683 static int
684 bnx2_5706s_linkup(struct bnx2 *bp)
685 {
686         u32 bmcr, local_adv, remote_adv, common;
687
688         bp->link_up = 1;
689         bp->line_speed = SPEED_1000;
690
691         bnx2_read_phy(bp, MII_BMCR, &bmcr);
692         if (bmcr & BMCR_FULLDPLX) {
693                 bp->duplex = DUPLEX_FULL;
694         }
695         else {
696                 bp->duplex = DUPLEX_HALF;
697         }
698
699         if (!(bmcr & BMCR_ANENABLE)) {
700                 return 0;
701         }
702
703         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
704         bnx2_read_phy(bp, MII_LPA, &remote_adv);
705
706         common = local_adv & remote_adv;
707         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
708
709                 if (common & ADVERTISE_1000XFULL) {
710                         bp->duplex = DUPLEX_FULL;
711                 }
712                 else {
713                         bp->duplex = DUPLEX_HALF;
714                 }
715         }
716
717         return 0;
718 }
719
720 static int
721 bnx2_copper_linkup(struct bnx2 *bp)
722 {
723         u32 bmcr;
724
725         bnx2_read_phy(bp, MII_BMCR, &bmcr);
726         if (bmcr & BMCR_ANENABLE) {
727                 u32 local_adv, remote_adv, common;
728
729                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
730                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
731
732                 common = local_adv & (remote_adv >> 2);
733                 if (common & ADVERTISE_1000FULL) {
734                         bp->line_speed = SPEED_1000;
735                         bp->duplex = DUPLEX_FULL;
736                 }
737                 else if (common & ADVERTISE_1000HALF) {
738                         bp->line_speed = SPEED_1000;
739                         bp->duplex = DUPLEX_HALF;
740                 }
741                 else {
742                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
743                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
744
745                         common = local_adv & remote_adv;
746                         if (common & ADVERTISE_100FULL) {
747                                 bp->line_speed = SPEED_100;
748                                 bp->duplex = DUPLEX_FULL;
749                         }
750                         else if (common & ADVERTISE_100HALF) {
751                                 bp->line_speed = SPEED_100;
752                                 bp->duplex = DUPLEX_HALF;
753                         }
754                         else if (common & ADVERTISE_10FULL) {
755                                 bp->line_speed = SPEED_10;
756                                 bp->duplex = DUPLEX_FULL;
757                         }
758                         else if (common & ADVERTISE_10HALF) {
759                                 bp->line_speed = SPEED_10;
760                                 bp->duplex = DUPLEX_HALF;
761                         }
762                         else {
763                                 bp->line_speed = 0;
764                                 bp->link_up = 0;
765                         }
766                 }
767         }
768         else {
769                 if (bmcr & BMCR_SPEED100) {
770                         bp->line_speed = SPEED_100;
771                 }
772                 else {
773                         bp->line_speed = SPEED_10;
774                 }
775                 if (bmcr & BMCR_FULLDPLX) {
776                         bp->duplex = DUPLEX_FULL;
777                 }
778                 else {
779                         bp->duplex = DUPLEX_HALF;
780                 }
781         }
782
783         return 0;
784 }
785
786 static int
787 bnx2_set_mac_link(struct bnx2 *bp)
788 {
789         u32 val;
790
791         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
792         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
793                 (bp->duplex == DUPLEX_HALF)) {
794                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
795         }
796
797         /* Configure the EMAC mode register. */
798         val = REG_RD(bp, BNX2_EMAC_MODE);
799
800         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
801                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
802                 BNX2_EMAC_MODE_25G);
803
804         if (bp->link_up) {
805                 switch (bp->line_speed) {
806                         case SPEED_10:
807                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
808                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
809                                         break;
810                                 }
811                                 /* fall through */
812                         case SPEED_100:
813                                 val |= BNX2_EMAC_MODE_PORT_MII;
814                                 break;
815                         case SPEED_2500:
816                                 val |= BNX2_EMAC_MODE_25G;
817                                 /* fall through */
818                         case SPEED_1000:
819                                 val |= BNX2_EMAC_MODE_PORT_GMII;
820                                 break;
821                 }
822         }
823         else {
824                 val |= BNX2_EMAC_MODE_PORT_GMII;
825         }
826
827         /* Set the MAC to operate in the appropriate duplex mode. */
828         if (bp->duplex == DUPLEX_HALF)
829                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
830         REG_WR(bp, BNX2_EMAC_MODE, val);
831
832         /* Enable/disable rx PAUSE. */
833         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
834
835         if (bp->flow_ctrl & FLOW_CTRL_RX)
836                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
837         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
838
839         /* Enable/disable tx PAUSE. */
840         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
841         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
842
843         if (bp->flow_ctrl & FLOW_CTRL_TX)
844                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
845         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
846
847         /* Acknowledge the interrupt. */
848         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
849
850         return 0;
851 }
852
853 static int
854 bnx2_set_link(struct bnx2 *bp)
855 {
856         u32 bmsr;
857         u8 link_up;
858
859         if (bp->loopback == MAC_LOOPBACK) {
860                 bp->link_up = 1;
861                 return 0;
862         }
863
864         link_up = bp->link_up;
865
866         bnx2_read_phy(bp, MII_BMSR, &bmsr);
867         bnx2_read_phy(bp, MII_BMSR, &bmsr);
868
869         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
870             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
871                 u32 val;
872
873                 val = REG_RD(bp, BNX2_EMAC_STATUS);
874                 if (val & BNX2_EMAC_STATUS_LINK)
875                         bmsr |= BMSR_LSTATUS;
876                 else
877                         bmsr &= ~BMSR_LSTATUS;
878         }
879
880         if (bmsr & BMSR_LSTATUS) {
881                 bp->link_up = 1;
882
883                 if (bp->phy_flags & PHY_SERDES_FLAG) {
884                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
885                                 bnx2_5706s_linkup(bp);
886                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
887                                 bnx2_5708s_linkup(bp);
888                 }
889                 else {
890                         bnx2_copper_linkup(bp);
891                 }
892                 bnx2_resolve_flow_ctrl(bp);
893         }
894         else {
895                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
896                         (bp->autoneg & AUTONEG_SPEED)) {
897
898                         u32 bmcr;
899
900                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
901                         if (!(bmcr & BMCR_ANENABLE)) {
902                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
903                                         BMCR_ANENABLE);
904                         }
905                 }
906                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
907                 bp->link_up = 0;
908         }
909
910         if (bp->link_up != link_up) {
911                 bnx2_report_link(bp);
912         }
913
914         bnx2_set_mac_link(bp);
915
916         return 0;
917 }
918
919 static int
920 bnx2_reset_phy(struct bnx2 *bp)
921 {
922         int i;
923         u32 reg;
924
925         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
926
927 #define PHY_RESET_MAX_WAIT 100
928         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
929                 udelay(10);
930
931                 bnx2_read_phy(bp, MII_BMCR, &reg);
932                 if (!(reg & BMCR_RESET)) {
933                         udelay(20);
934                         break;
935                 }
936         }
937         if (i == PHY_RESET_MAX_WAIT) {
938                 return -EBUSY;
939         }
940         return 0;
941 }
942
943 static u32
944 bnx2_phy_get_pause_adv(struct bnx2 *bp)
945 {
946         u32 adv = 0;
947
948         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
949                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
950
951                 if (bp->phy_flags & PHY_SERDES_FLAG) {
952                         adv = ADVERTISE_1000XPAUSE;
953                 }
954                 else {
955                         adv = ADVERTISE_PAUSE_CAP;
956                 }
957         }
958         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
959                 if (bp->phy_flags & PHY_SERDES_FLAG) {
960                         adv = ADVERTISE_1000XPSE_ASYM;
961                 }
962                 else {
963                         adv = ADVERTISE_PAUSE_ASYM;
964                 }
965         }
966         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
967                 if (bp->phy_flags & PHY_SERDES_FLAG) {
968                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
969                 }
970                 else {
971                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
972                 }
973         }
974         return adv;
975 }
976
977 static int
978 bnx2_setup_serdes_phy(struct bnx2 *bp)
979 {
980         u32 adv, bmcr, up1;
981         u32 new_adv = 0;
982
983         if (!(bp->autoneg & AUTONEG_SPEED)) {
984                 u32 new_bmcr;
985                 int force_link_down = 0;
986
987                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
988                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
989                         if (up1 & BCM5708S_UP1_2G5) {
990                                 up1 &= ~BCM5708S_UP1_2G5;
991                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
992                                 force_link_down = 1;
993                         }
994                 }
995
996                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
997                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
998
999                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1000                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1001                 new_bmcr |= BMCR_SPEED1000;
1002                 if (bp->req_duplex == DUPLEX_FULL) {
1003                         adv |= ADVERTISE_1000XFULL;
1004                         new_bmcr |= BMCR_FULLDPLX;
1005                 }
1006                 else {
1007                         adv |= ADVERTISE_1000XHALF;
1008                         new_bmcr &= ~BMCR_FULLDPLX;
1009                 }
1010                 if ((new_bmcr != bmcr) || (force_link_down)) {
1011                         /* Force a link down visible on the other side */
1012                         if (bp->link_up) {
1013                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1014                                                ~(ADVERTISE_1000XFULL |
1015                                                  ADVERTISE_1000XHALF));
1016                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1017                                         BMCR_ANRESTART | BMCR_ANENABLE);
1018
1019                                 bp->link_up = 0;
1020                                 netif_carrier_off(bp->dev);
1021                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1022                         }
1023                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1024                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1025                 }
1026                 return 0;
1027         }
1028
1029         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1030                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1031                 up1 |= BCM5708S_UP1_2G5;
1032                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1033         }
1034
1035         if (bp->advertising & ADVERTISED_1000baseT_Full)
1036                 new_adv |= ADVERTISE_1000XFULL;
1037
1038         new_adv |= bnx2_phy_get_pause_adv(bp);
1039
1040         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1041         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1042
1043         bp->serdes_an_pending = 0;
1044         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1045                 /* Force a link down visible on the other side */
1046                 if (bp->link_up) {
1047                         int i;
1048
1049                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1050                         for (i = 0; i < 110; i++) {
1051                                 udelay(100);
1052                         }
1053                 }
1054
1055                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1056                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1057                         BMCR_ANENABLE);
1058                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1059                         /* Speed up link-up time when the link partner
1060                          * does not autonegotiate which is very common
1061                          * in blade servers. Some blade servers use
1062                          * IPMI for kerboard input and it's important
1063                          * to minimize link disruptions. Autoneg. involves
1064                          * exchanging base pages plus 3 next pages and
1065                          * normally completes in about 120 msec.
1066                          */
1067                         bp->current_interval = SERDES_AN_TIMEOUT;
1068                         bp->serdes_an_pending = 1;
1069                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1070                 }
1071         }
1072
1073         return 0;
1074 }
1075
1076 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1077         (ADVERTISED_1000baseT_Full)
1078
1079 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1080         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1081         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1082         ADVERTISED_1000baseT_Full)
1083
1084 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1085         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1086         
1087 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1088
1089 static int
1090 bnx2_setup_copper_phy(struct bnx2 *bp)
1091 {
1092         u32 bmcr;
1093         u32 new_bmcr;
1094
1095         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1096
1097         if (bp->autoneg & AUTONEG_SPEED) {
1098                 u32 adv_reg, adv1000_reg;
1099                 u32 new_adv_reg = 0;
1100                 u32 new_adv1000_reg = 0;
1101
1102                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1103                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1104                         ADVERTISE_PAUSE_ASYM);
1105
1106                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1107                 adv1000_reg &= PHY_ALL_1000_SPEED;
1108
1109                 if (bp->advertising & ADVERTISED_10baseT_Half)
1110                         new_adv_reg |= ADVERTISE_10HALF;
1111                 if (bp->advertising & ADVERTISED_10baseT_Full)
1112                         new_adv_reg |= ADVERTISE_10FULL;
1113                 if (bp->advertising & ADVERTISED_100baseT_Half)
1114                         new_adv_reg |= ADVERTISE_100HALF;
1115                 if (bp->advertising & ADVERTISED_100baseT_Full)
1116                         new_adv_reg |= ADVERTISE_100FULL;
1117                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1118                         new_adv1000_reg |= ADVERTISE_1000FULL;
1119                 
1120                 new_adv_reg |= ADVERTISE_CSMA;
1121
1122                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1123
1124                 if ((adv1000_reg != new_adv1000_reg) ||
1125                         (adv_reg != new_adv_reg) ||
1126                         ((bmcr & BMCR_ANENABLE) == 0)) {
1127
1128                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1129                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1130                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1131                                 BMCR_ANENABLE);
1132                 }
1133                 else if (bp->link_up) {
1134                         /* Flow ctrl may have changed from auto to forced */
1135                         /* or vice-versa. */
1136
1137                         bnx2_resolve_flow_ctrl(bp);
1138                         bnx2_set_mac_link(bp);
1139                 }
1140                 return 0;
1141         }
1142
1143         new_bmcr = 0;
1144         if (bp->req_line_speed == SPEED_100) {
1145                 new_bmcr |= BMCR_SPEED100;
1146         }
1147         if (bp->req_duplex == DUPLEX_FULL) {
1148                 new_bmcr |= BMCR_FULLDPLX;
1149         }
1150         if (new_bmcr != bmcr) {
1151                 u32 bmsr;
1152                 int i = 0;
1153
1154                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1155                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1156                 
1157                 if (bmsr & BMSR_LSTATUS) {
1158                         /* Force link down */
1159                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1160                         do {
1161                                 udelay(100);
1162                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1163                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164                                 i++;
1165                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1166                 }
1167
1168                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1169
1170                 /* Normally, the new speed is setup after the link has
1171                  * gone down and up again. In some cases, link will not go
1172                  * down so we need to set up the new speed here.
1173                  */
1174                 if (bmsr & BMSR_LSTATUS) {
1175                         bp->line_speed = bp->req_line_speed;
1176                         bp->duplex = bp->req_duplex;
1177                         bnx2_resolve_flow_ctrl(bp);
1178                         bnx2_set_mac_link(bp);
1179                 }
1180         }
1181         return 0;
1182 }
1183
1184 static int
1185 bnx2_setup_phy(struct bnx2 *bp)
1186 {
1187         if (bp->loopback == MAC_LOOPBACK)
1188                 return 0;
1189
1190         if (bp->phy_flags & PHY_SERDES_FLAG) {
1191                 return (bnx2_setup_serdes_phy(bp));
1192         }
1193         else {
1194                 return (bnx2_setup_copper_phy(bp));
1195         }
1196 }
1197
1198 static int
1199 bnx2_init_5708s_phy(struct bnx2 *bp)
1200 {
1201         u32 val;
1202
1203         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1204         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1205         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1206
1207         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1208         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1209         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1210
1211         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1212         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1213         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1214
1215         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1216                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1217                 val |= BCM5708S_UP1_2G5;
1218                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1219         }
1220
1221         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1222             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1223             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1224                 /* increase tx signal amplitude */
1225                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1226                                BCM5708S_BLK_ADDR_TX_MISC);
1227                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1228                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1229                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1230                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1231         }
1232
1233         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1234               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1235
1236         if (val) {
1237                 u32 is_backplane;
1238
1239                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1240                                           BNX2_SHARED_HW_CFG_CONFIG);
1241                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1242                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1243                                        BCM5708S_BLK_ADDR_TX_MISC);
1244                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1245                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1246                                        BCM5708S_BLK_ADDR_DIG);
1247                 }
1248         }
1249         return 0;
1250 }
1251
1252 static int
1253 bnx2_init_5706s_phy(struct bnx2 *bp)
1254 {
1255         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1256
1257         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1258                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1259         }
1260
1261         if (bp->dev->mtu > 1500) {
1262                 u32 val;
1263
1264                 /* Set extended packet length bit */
1265                 bnx2_write_phy(bp, 0x18, 0x7);
1266                 bnx2_read_phy(bp, 0x18, &val);
1267                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1268
1269                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1270                 bnx2_read_phy(bp, 0x1c, &val);
1271                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1272         }
1273         else {
1274                 u32 val;
1275
1276                 bnx2_write_phy(bp, 0x18, 0x7);
1277                 bnx2_read_phy(bp, 0x18, &val);
1278                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1279
1280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1281                 bnx2_read_phy(bp, 0x1c, &val);
1282                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int
1289 bnx2_init_copper_phy(struct bnx2 *bp)
1290 {
1291         u32 val;
1292
1293         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1294
1295         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1296                 bnx2_write_phy(bp, 0x18, 0x0c00);
1297                 bnx2_write_phy(bp, 0x17, 0x000a);
1298                 bnx2_write_phy(bp, 0x15, 0x310b);
1299                 bnx2_write_phy(bp, 0x17, 0x201f);
1300                 bnx2_write_phy(bp, 0x15, 0x9506);
1301                 bnx2_write_phy(bp, 0x17, 0x401f);
1302                 bnx2_write_phy(bp, 0x15, 0x14e2);
1303                 bnx2_write_phy(bp, 0x18, 0x0400);
1304         }
1305
1306         if (bp->dev->mtu > 1500) {
1307                 /* Set extended packet length bit */
1308                 bnx2_write_phy(bp, 0x18, 0x7);
1309                 bnx2_read_phy(bp, 0x18, &val);
1310                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1311
1312                 bnx2_read_phy(bp, 0x10, &val);
1313                 bnx2_write_phy(bp, 0x10, val | 0x1);
1314         }
1315         else {
1316                 bnx2_write_phy(bp, 0x18, 0x7);
1317                 bnx2_read_phy(bp, 0x18, &val);
1318                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1319
1320                 bnx2_read_phy(bp, 0x10, &val);
1321                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1322         }
1323
1324         /* ethernet@wirespeed */
1325         bnx2_write_phy(bp, 0x18, 0x7007);
1326         bnx2_read_phy(bp, 0x18, &val);
1327         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1328         return 0;
1329 }
1330
1331
1332 static int
1333 bnx2_init_phy(struct bnx2 *bp)
1334 {
1335         u32 val;
1336         int rc = 0;
1337
1338         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1339         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1340
1341         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1342
1343         bnx2_reset_phy(bp);
1344
1345         bnx2_read_phy(bp, MII_PHYSID1, &val);
1346         bp->phy_id = val << 16;
1347         bnx2_read_phy(bp, MII_PHYSID2, &val);
1348         bp->phy_id |= val & 0xffff;
1349
1350         if (bp->phy_flags & PHY_SERDES_FLAG) {
1351                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1352                         rc = bnx2_init_5706s_phy(bp);
1353                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1354                         rc = bnx2_init_5708s_phy(bp);
1355         }
1356         else {
1357                 rc = bnx2_init_copper_phy(bp);
1358         }
1359
1360         bnx2_setup_phy(bp);
1361
1362         return rc;
1363 }
1364
1365 static int
1366 bnx2_set_mac_loopback(struct bnx2 *bp)
1367 {
1368         u32 mac_mode;
1369
1370         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1371         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1372         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1373         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1374         bp->link_up = 1;
1375         return 0;
1376 }
1377
1378 static int bnx2_test_link(struct bnx2 *);
1379
1380 static int
1381 bnx2_set_phy_loopback(struct bnx2 *bp)
1382 {
1383         u32 mac_mode;
1384         int rc, i;
1385
1386         spin_lock_bh(&bp->phy_lock);
1387         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1388                             BMCR_SPEED1000);
1389         spin_unlock_bh(&bp->phy_lock);
1390         if (rc)
1391                 return rc;
1392
1393         for (i = 0; i < 10; i++) {
1394                 if (bnx2_test_link(bp) == 0)
1395                         break;
1396                 udelay(10);
1397         }
1398
1399         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1400         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1401                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1402                       BNX2_EMAC_MODE_25G);
1403
1404         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1405         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1406         bp->link_up = 1;
1407         return 0;
1408 }
1409
1410 static int
1411 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1412 {
1413         int i;
1414         u32 val;
1415
1416         bp->fw_wr_seq++;
1417         msg_data |= bp->fw_wr_seq;
1418
1419         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1420
1421         /* wait for an acknowledgement. */
1422         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1423                 msleep(10);
1424
1425                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1426
1427                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1428                         break;
1429         }
1430         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1431                 return 0;
1432
1433         /* If we timed out, inform the firmware that this is the case. */
1434         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1435                 if (!silent)
1436                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1437                                             "%x\n", msg_data);
1438
1439                 msg_data &= ~BNX2_DRV_MSG_CODE;
1440                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1441
1442                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1443
1444                 return -EBUSY;
1445         }
1446
1447         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1448                 return -EIO;
1449
1450         return 0;
1451 }
1452
1453 static void
1454 bnx2_init_context(struct bnx2 *bp)
1455 {
1456         u32 vcid;
1457
1458         vcid = 96;
1459         while (vcid) {
1460                 u32 vcid_addr, pcid_addr, offset;
1461
1462                 vcid--;
1463
1464                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1465                         u32 new_vcid;
1466
1467                         vcid_addr = GET_PCID_ADDR(vcid);
1468                         if (vcid & 0x8) {
1469                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1470                         }
1471                         else {
1472                                 new_vcid = vcid;
1473                         }
1474                         pcid_addr = GET_PCID_ADDR(new_vcid);
1475                 }
1476                 else {
1477                         vcid_addr = GET_CID_ADDR(vcid);
1478                         pcid_addr = vcid_addr;
1479                 }
1480
1481                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1482                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1483
1484                 /* Zero out the context. */
1485                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1486                         CTX_WR(bp, 0x00, offset, 0);
1487                 }
1488
1489                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1490                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1491         }
1492 }
1493
1494 static int
1495 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1496 {
1497         u16 *good_mbuf;
1498         u32 good_mbuf_cnt;
1499         u32 val;
1500
1501         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1502         if (good_mbuf == NULL) {
1503                 printk(KERN_ERR PFX "Failed to allocate memory in "
1504                                     "bnx2_alloc_bad_rbuf\n");
1505                 return -ENOMEM;
1506         }
1507
1508         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1509                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1510
1511         good_mbuf_cnt = 0;
1512
1513         /* Allocate a bunch of mbufs and save the good ones in an array. */
1514         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1515         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1516                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1517
1518                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1519
1520                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1521
1522                 /* The addresses with Bit 9 set are bad memory blocks. */
1523                 if (!(val & (1 << 9))) {
1524                         good_mbuf[good_mbuf_cnt] = (u16) val;
1525                         good_mbuf_cnt++;
1526                 }
1527
1528                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1529         }
1530
1531         /* Free the good ones back to the mbuf pool thus discarding
1532          * all the bad ones. */
1533         while (good_mbuf_cnt) {
1534                 good_mbuf_cnt--;
1535
1536                 val = good_mbuf[good_mbuf_cnt];
1537                 val = (val << 9) | val | 1;
1538
1539                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1540         }
1541         kfree(good_mbuf);
1542         return 0;
1543 }
1544
1545 static void
1546 bnx2_set_mac_addr(struct bnx2 *bp) 
1547 {
1548         u32 val;
1549         u8 *mac_addr = bp->dev->dev_addr;
1550
1551         val = (mac_addr[0] << 8) | mac_addr[1];
1552
1553         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1554
1555         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1556                 (mac_addr[4] << 8) | mac_addr[5];
1557
1558         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1559 }
1560
1561 static inline int
1562 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1563 {
1564         struct sk_buff *skb;
1565         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1566         dma_addr_t mapping;
1567         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1568         unsigned long align;
1569
1570         skb = dev_alloc_skb(bp->rx_buf_size);
1571         if (skb == NULL) {
1572                 return -ENOMEM;
1573         }
1574
1575         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1576                 skb_reserve(skb, 8 - align);
1577         }
1578
1579         skb->dev = bp->dev;
1580         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1581                 PCI_DMA_FROMDEVICE);
1582
1583         rx_buf->skb = skb;
1584         pci_unmap_addr_set(rx_buf, mapping, mapping);
1585
1586         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1587         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1588
1589         bp->rx_prod_bseq += bp->rx_buf_use_size;
1590
1591         return 0;
1592 }
1593
1594 static void
1595 bnx2_phy_int(struct bnx2 *bp)
1596 {
1597         u32 new_link_state, old_link_state;
1598
1599         new_link_state = bp->status_blk->status_attn_bits &
1600                 STATUS_ATTN_BITS_LINK_STATE;
1601         old_link_state = bp->status_blk->status_attn_bits_ack &
1602                 STATUS_ATTN_BITS_LINK_STATE;
1603         if (new_link_state != old_link_state) {
1604                 if (new_link_state) {
1605                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1606                                 STATUS_ATTN_BITS_LINK_STATE);
1607                 }
1608                 else {
1609                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1610                                 STATUS_ATTN_BITS_LINK_STATE);
1611                 }
1612                 bnx2_set_link(bp);
1613         }
1614 }
1615
1616 static void
1617 bnx2_tx_int(struct bnx2 *bp)
1618 {
1619         struct status_block *sblk = bp->status_blk;
1620         u16 hw_cons, sw_cons, sw_ring_cons;
1621         int tx_free_bd = 0;
1622
1623         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1624         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1625                 hw_cons++;
1626         }
1627         sw_cons = bp->tx_cons;
1628
1629         while (sw_cons != hw_cons) {
1630                 struct sw_bd *tx_buf;
1631                 struct sk_buff *skb;
1632                 int i, last;
1633
1634                 sw_ring_cons = TX_RING_IDX(sw_cons);
1635
1636                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1637                 skb = tx_buf->skb;
1638 #ifdef BCM_TSO 
1639                 /* partial BD completions possible with TSO packets */
1640                 if (skb_shinfo(skb)->tso_size) {
1641                         u16 last_idx, last_ring_idx;
1642
1643                         last_idx = sw_cons +
1644                                 skb_shinfo(skb)->nr_frags + 1;
1645                         last_ring_idx = sw_ring_cons +
1646                                 skb_shinfo(skb)->nr_frags + 1;
1647                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1648                                 last_idx++;
1649                         }
1650                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1651                                 break;
1652                         }
1653                 }
1654 #endif
1655                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1656                         skb_headlen(skb), PCI_DMA_TODEVICE);
1657
1658                 tx_buf->skb = NULL;
1659                 last = skb_shinfo(skb)->nr_frags;
1660
1661                 for (i = 0; i < last; i++) {
1662                         sw_cons = NEXT_TX_BD(sw_cons);
1663
1664                         pci_unmap_page(bp->pdev,
1665                                 pci_unmap_addr(
1666                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1667                                         mapping),
1668                                 skb_shinfo(skb)->frags[i].size,
1669                                 PCI_DMA_TODEVICE);
1670                 }
1671
1672                 sw_cons = NEXT_TX_BD(sw_cons);
1673
1674                 tx_free_bd += last + 1;
1675
1676                 dev_kfree_skb_irq(skb);
1677
1678                 hw_cons = bp->hw_tx_cons =
1679                         sblk->status_tx_quick_consumer_index0;
1680
1681                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1682                         hw_cons++;
1683                 }
1684         }
1685
1686         bp->tx_cons = sw_cons;
1687
1688         if (unlikely(netif_queue_stopped(bp->dev))) {
1689                 spin_lock(&bp->tx_lock);
1690                 if ((netif_queue_stopped(bp->dev)) &&
1691                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1692
1693                         netif_wake_queue(bp->dev);
1694                 }
1695                 spin_unlock(&bp->tx_lock);
1696         }
1697 }
1698
1699 static inline void
1700 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1701         u16 cons, u16 prod)
1702 {
1703         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1704         struct rx_bd *cons_bd, *prod_bd;
1705
1706         cons_rx_buf = &bp->rx_buf_ring[cons];
1707         prod_rx_buf = &bp->rx_buf_ring[prod];
1708
1709         pci_dma_sync_single_for_device(bp->pdev,
1710                 pci_unmap_addr(cons_rx_buf, mapping),
1711                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1712
1713         bp->rx_prod_bseq += bp->rx_buf_use_size;
1714
1715         prod_rx_buf->skb = skb;
1716
1717         if (cons == prod)
1718                 return;
1719
1720         pci_unmap_addr_set(prod_rx_buf, mapping,
1721                         pci_unmap_addr(cons_rx_buf, mapping));
1722
1723         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1724         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1725         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1726         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1727 }
1728
1729 static int
1730 bnx2_rx_int(struct bnx2 *bp, int budget)
1731 {
1732         struct status_block *sblk = bp->status_blk;
1733         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1734         struct l2_fhdr *rx_hdr;
1735         int rx_pkt = 0;
1736
1737         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1738         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1739                 hw_cons++;
1740         }
1741         sw_cons = bp->rx_cons;
1742         sw_prod = bp->rx_prod;
1743
1744         /* Memory barrier necessary as speculative reads of the rx
1745          * buffer can be ahead of the index in the status block
1746          */
1747         rmb();
1748         while (sw_cons != hw_cons) {
1749                 unsigned int len;
1750                 u32 status;
1751                 struct sw_bd *rx_buf;
1752                 struct sk_buff *skb;
1753                 dma_addr_t dma_addr;
1754
1755                 sw_ring_cons = RX_RING_IDX(sw_cons);
1756                 sw_ring_prod = RX_RING_IDX(sw_prod);
1757
1758                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1759                 skb = rx_buf->skb;
1760
1761                 rx_buf->skb = NULL;
1762
1763                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1764
1765                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1766                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1767
1768                 rx_hdr = (struct l2_fhdr *) skb->data;
1769                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1770
1771                 if ((status = rx_hdr->l2_fhdr_status) &
1772                         (L2_FHDR_ERRORS_BAD_CRC |
1773                         L2_FHDR_ERRORS_PHY_DECODE |
1774                         L2_FHDR_ERRORS_ALIGNMENT |
1775                         L2_FHDR_ERRORS_TOO_SHORT |
1776                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1777
1778                         goto reuse_rx;
1779                 }
1780
1781                 /* Since we don't have a jumbo ring, copy small packets
1782                  * if mtu > 1500
1783                  */
1784                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1785                         struct sk_buff *new_skb;
1786
1787                         new_skb = dev_alloc_skb(len + 2);
1788                         if (new_skb == NULL)
1789                                 goto reuse_rx;
1790
1791                         /* aligned copy */
1792                         memcpy(new_skb->data,
1793                                 skb->data + bp->rx_offset - 2,
1794                                 len + 2);
1795
1796                         skb_reserve(new_skb, 2);
1797                         skb_put(new_skb, len);
1798                         new_skb->dev = bp->dev;
1799
1800                         bnx2_reuse_rx_skb(bp, skb,
1801                                 sw_ring_cons, sw_ring_prod);
1802
1803                         skb = new_skb;
1804                 }
1805                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1806                         pci_unmap_single(bp->pdev, dma_addr,
1807                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1808
1809                         skb_reserve(skb, bp->rx_offset);
1810                         skb_put(skb, len);
1811                 }
1812                 else {
1813 reuse_rx:
1814                         bnx2_reuse_rx_skb(bp, skb,
1815                                 sw_ring_cons, sw_ring_prod);
1816                         goto next_rx;
1817                 }
1818
1819                 skb->protocol = eth_type_trans(skb, bp->dev);
1820
1821                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1822                         (htons(skb->protocol) != 0x8100)) {
1823
1824                         dev_kfree_skb_irq(skb);
1825                         goto next_rx;
1826
1827                 }
1828
1829                 skb->ip_summed = CHECKSUM_NONE;
1830                 if (bp->rx_csum &&
1831                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1832                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1833
1834                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1835                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1836                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1837                 }
1838
1839 #ifdef BCM_VLAN
1840                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1841                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1842                                 rx_hdr->l2_fhdr_vlan_tag);
1843                 }
1844                 else
1845 #endif
1846                         netif_receive_skb(skb);
1847
1848                 bp->dev->last_rx = jiffies;
1849                 rx_pkt++;
1850
1851 next_rx:
1852                 sw_cons = NEXT_RX_BD(sw_cons);
1853                 sw_prod = NEXT_RX_BD(sw_prod);
1854
1855                 if ((rx_pkt == budget))
1856                         break;
1857
1858                 /* Refresh hw_cons to see if there is new work */
1859                 if (sw_cons == hw_cons) {
1860                         hw_cons = bp->hw_rx_cons =
1861                                 sblk->status_rx_quick_consumer_index0;
1862                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1863                                 hw_cons++;
1864                         rmb();
1865                 }
1866         }
1867         bp->rx_cons = sw_cons;
1868         bp->rx_prod = sw_prod;
1869
1870         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1871
1872         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1873
1874         mmiowb();
1875
1876         return rx_pkt;
1877
1878 }
1879
1880 /* MSI ISR - The only difference between this and the INTx ISR
1881  * is that the MSI interrupt is always serviced.
1882  */
1883 static irqreturn_t
1884 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1885 {
1886         struct net_device *dev = dev_instance;
1887         struct bnx2 *bp = netdev_priv(dev);
1888
1889         prefetch(bp->status_blk);
1890         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1891                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1892                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1893
1894         /* Return here if interrupt is disabled. */
1895         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1896                 return IRQ_HANDLED;
1897
1898         netif_rx_schedule(dev);
1899
1900         return IRQ_HANDLED;
1901 }
1902
1903 static irqreturn_t
1904 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1905 {
1906         struct net_device *dev = dev_instance;
1907         struct bnx2 *bp = netdev_priv(dev);
1908
1909         /* When using INTx, it is possible for the interrupt to arrive
1910          * at the CPU before the status block posted prior to the
1911          * interrupt. Reading a register will flush the status block.
1912          * When using MSI, the MSI message will always complete after
1913          * the status block write.
1914          */
1915         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1916             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1917              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1918                 return IRQ_NONE;
1919
1920         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1921                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1922                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1923
1924         /* Return here if interrupt is shared and is disabled. */
1925         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1926                 return IRQ_HANDLED;
1927
1928         netif_rx_schedule(dev);
1929
1930         return IRQ_HANDLED;
1931 }
1932
1933 static inline int
1934 bnx2_has_work(struct bnx2 *bp)
1935 {
1936         struct status_block *sblk = bp->status_blk;
1937
1938         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1939             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1940                 return 1;
1941
1942         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1943             bp->link_up)
1944                 return 1;
1945
1946         return 0;
1947 }
1948
1949 static int
1950 bnx2_poll(struct net_device *dev, int *budget)
1951 {
1952         struct bnx2 *bp = netdev_priv(dev);
1953
1954         if ((bp->status_blk->status_attn_bits &
1955                 STATUS_ATTN_BITS_LINK_STATE) !=
1956                 (bp->status_blk->status_attn_bits_ack &
1957                 STATUS_ATTN_BITS_LINK_STATE)) {
1958
1959                 spin_lock(&bp->phy_lock);
1960                 bnx2_phy_int(bp);
1961                 spin_unlock(&bp->phy_lock);
1962
1963                 /* This is needed to take care of transient status
1964                  * during link changes.
1965                  */
1966                 REG_WR(bp, BNX2_HC_COMMAND,
1967                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1968                 REG_RD(bp, BNX2_HC_COMMAND);
1969         }
1970
1971         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1972                 bnx2_tx_int(bp);
1973
1974         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1975                 int orig_budget = *budget;
1976                 int work_done;
1977
1978                 if (orig_budget > dev->quota)
1979                         orig_budget = dev->quota;
1980                 
1981                 work_done = bnx2_rx_int(bp, orig_budget);
1982                 *budget -= work_done;
1983                 dev->quota -= work_done;
1984         }
1985         
1986         bp->last_status_idx = bp->status_blk->status_idx;
1987         rmb();
1988
1989         if (!bnx2_has_work(bp)) {
1990                 netif_rx_complete(dev);
1991                 if (likely(bp->flags & USING_MSI_FLAG)) {
1992                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1993                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1994                                bp->last_status_idx);
1995                         return 0;
1996                 }
1997                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1998                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1999                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2000                        bp->last_status_idx);
2001
2002                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2003                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2004                        bp->last_status_idx);
2005                 return 0;
2006         }
2007
2008         return 1;
2009 }
2010
2011 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
2012  * from set_multicast.
2013  */
2014 static void
2015 bnx2_set_rx_mode(struct net_device *dev)
2016 {
2017         struct bnx2 *bp = netdev_priv(dev);
2018         u32 rx_mode, sort_mode;
2019         int i;
2020
2021         spin_lock_bh(&bp->phy_lock);
2022
2023         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2024                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2025         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2026 #ifdef BCM_VLAN
2027         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2028                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2029 #else
2030         if (!(bp->flags & ASF_ENABLE_FLAG))
2031                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2032 #endif
2033         if (dev->flags & IFF_PROMISC) {
2034                 /* Promiscuous mode. */
2035                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2036                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2037         }
2038         else if (dev->flags & IFF_ALLMULTI) {
2039                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2040                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2041                                0xffffffff);
2042                 }
2043                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2044         }
2045         else {
2046                 /* Accept one or more multicast(s). */
2047                 struct dev_mc_list *mclist;
2048                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2049                 u32 regidx;
2050                 u32 bit;
2051                 u32 crc;
2052
2053                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2054
2055                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2056                      i++, mclist = mclist->next) {
2057
2058                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2059                         bit = crc & 0xff;
2060                         regidx = (bit & 0xe0) >> 5;
2061                         bit &= 0x1f;
2062                         mc_filter[regidx] |= (1 << bit);
2063                 }
2064
2065                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2066                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2067                                mc_filter[i]);
2068                 }
2069
2070                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2071         }
2072
2073         if (rx_mode != bp->rx_mode) {
2074                 bp->rx_mode = rx_mode;
2075                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2076         }
2077
2078         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2079         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2080         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2081
2082         spin_unlock_bh(&bp->phy_lock);
2083 }
2084
2085 static void
2086 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2087         u32 rv2p_proc)
2088 {
2089         int i;
2090         u32 val;
2091
2092
2093         for (i = 0; i < rv2p_code_len; i += 8) {
2094                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2095                 rv2p_code++;
2096                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2097                 rv2p_code++;
2098
2099                 if (rv2p_proc == RV2P_PROC1) {
2100                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2101                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2102                 }
2103                 else {
2104                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2105                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2106                 }
2107         }
2108
2109         /* Reset the processor, un-stall is done later. */
2110         if (rv2p_proc == RV2P_PROC1) {
2111                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2112         }
2113         else {
2114                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2115         }
2116 }
2117
2118 static void
2119 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2120 {
2121         u32 offset;
2122         u32 val;
2123
2124         /* Halt the CPU. */
2125         val = REG_RD_IND(bp, cpu_reg->mode);
2126         val |= cpu_reg->mode_value_halt;
2127         REG_WR_IND(bp, cpu_reg->mode, val);
2128         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2129
2130         /* Load the Text area. */
2131         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2132         if (fw->text) {
2133                 int j;
2134
2135                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2136                         REG_WR_IND(bp, offset, fw->text[j]);
2137                 }
2138         }
2139
2140         /* Load the Data area. */
2141         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2142         if (fw->data) {
2143                 int j;
2144
2145                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2146                         REG_WR_IND(bp, offset, fw->data[j]);
2147                 }
2148         }
2149
2150         /* Load the SBSS area. */
2151         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2152         if (fw->sbss) {
2153                 int j;
2154
2155                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2156                         REG_WR_IND(bp, offset, fw->sbss[j]);
2157                 }
2158         }
2159
2160         /* Load the BSS area. */
2161         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2162         if (fw->bss) {
2163                 int j;
2164
2165                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2166                         REG_WR_IND(bp, offset, fw->bss[j]);
2167                 }
2168         }
2169
2170         /* Load the Read-Only area. */
2171         offset = cpu_reg->spad_base +
2172                 (fw->rodata_addr - cpu_reg->mips_view_base);
2173         if (fw->rodata) {
2174                 int j;
2175
2176                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2177                         REG_WR_IND(bp, offset, fw->rodata[j]);
2178                 }
2179         }
2180
2181         /* Clear the pre-fetch instruction. */
2182         REG_WR_IND(bp, cpu_reg->inst, 0);
2183         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2184
2185         /* Start the CPU. */
2186         val = REG_RD_IND(bp, cpu_reg->mode);
2187         val &= ~cpu_reg->mode_value_halt;
2188         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2189         REG_WR_IND(bp, cpu_reg->mode, val);
2190 }
2191
2192 static void
2193 bnx2_init_cpus(struct bnx2 *bp)
2194 {
2195         struct cpu_reg cpu_reg;
2196         struct fw_info fw;
2197
2198         /* Initialize the RV2P processor. */
2199         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2200         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2201
2202         /* Initialize the RX Processor. */
2203         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2204         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2205         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2206         cpu_reg.state = BNX2_RXP_CPU_STATE;
2207         cpu_reg.state_value_clear = 0xffffff;
2208         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2209         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2210         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2211         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2212         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2213         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2214         cpu_reg.mips_view_base = 0x8000000;
2215     
2216         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2217         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2218         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2219         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2220
2221         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2222         fw.text_len = bnx2_RXP_b06FwTextLen;
2223         fw.text_index = 0;
2224         fw.text = bnx2_RXP_b06FwText;
2225
2226         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2227         fw.data_len = bnx2_RXP_b06FwDataLen;
2228         fw.data_index = 0;
2229         fw.data = bnx2_RXP_b06FwData;
2230
2231         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2232         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2233         fw.sbss_index = 0;
2234         fw.sbss = bnx2_RXP_b06FwSbss;
2235
2236         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2237         fw.bss_len = bnx2_RXP_b06FwBssLen;
2238         fw.bss_index = 0;
2239         fw.bss = bnx2_RXP_b06FwBss;
2240
2241         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2242         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2243         fw.rodata_index = 0;
2244         fw.rodata = bnx2_RXP_b06FwRodata;
2245
2246         load_cpu_fw(bp, &cpu_reg, &fw);
2247
2248         /* Initialize the TX Processor. */
2249         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2250         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2251         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2252         cpu_reg.state = BNX2_TXP_CPU_STATE;
2253         cpu_reg.state_value_clear = 0xffffff;
2254         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2255         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2256         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2257         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2258         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2259         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2260         cpu_reg.mips_view_base = 0x8000000;
2261     
2262         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2263         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2264         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2265         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2266
2267         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2268         fw.text_len = bnx2_TXP_b06FwTextLen;
2269         fw.text_index = 0;
2270         fw.text = bnx2_TXP_b06FwText;
2271
2272         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2273         fw.data_len = bnx2_TXP_b06FwDataLen;
2274         fw.data_index = 0;
2275         fw.data = bnx2_TXP_b06FwData;
2276
2277         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2278         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2279         fw.sbss_index = 0;
2280         fw.sbss = bnx2_TXP_b06FwSbss;
2281
2282         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2283         fw.bss_len = bnx2_TXP_b06FwBssLen;
2284         fw.bss_index = 0;
2285         fw.bss = bnx2_TXP_b06FwBss;
2286
2287         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2288         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2289         fw.rodata_index = 0;
2290         fw.rodata = bnx2_TXP_b06FwRodata;
2291
2292         load_cpu_fw(bp, &cpu_reg, &fw);
2293
2294         /* Initialize the TX Patch-up Processor. */
2295         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2296         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2297         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2298         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2299         cpu_reg.state_value_clear = 0xffffff;
2300         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2301         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2302         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2303         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2304         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2305         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2306         cpu_reg.mips_view_base = 0x8000000;
2307     
2308         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2309         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2310         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2311         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2312
2313         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2314         fw.text_len = bnx2_TPAT_b06FwTextLen;
2315         fw.text_index = 0;
2316         fw.text = bnx2_TPAT_b06FwText;
2317
2318         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2319         fw.data_len = bnx2_TPAT_b06FwDataLen;
2320         fw.data_index = 0;
2321         fw.data = bnx2_TPAT_b06FwData;
2322
2323         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2324         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2325         fw.sbss_index = 0;
2326         fw.sbss = bnx2_TPAT_b06FwSbss;
2327
2328         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2329         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2330         fw.bss_index = 0;
2331         fw.bss = bnx2_TPAT_b06FwBss;
2332
2333         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2334         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2335         fw.rodata_index = 0;
2336         fw.rodata = bnx2_TPAT_b06FwRodata;
2337
2338         load_cpu_fw(bp, &cpu_reg, &fw);
2339
2340         /* Initialize the Completion Processor. */
2341         cpu_reg.mode = BNX2_COM_CPU_MODE;
2342         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2343         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2344         cpu_reg.state = BNX2_COM_CPU_STATE;
2345         cpu_reg.state_value_clear = 0xffffff;
2346         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2347         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2348         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2349         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2350         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2351         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2352         cpu_reg.mips_view_base = 0x8000000;
2353     
2354         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2355         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2356         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2357         fw.start_addr = bnx2_COM_b06FwStartAddr;
2358
2359         fw.text_addr = bnx2_COM_b06FwTextAddr;
2360         fw.text_len = bnx2_COM_b06FwTextLen;
2361         fw.text_index = 0;
2362         fw.text = bnx2_COM_b06FwText;
2363
2364         fw.data_addr = bnx2_COM_b06FwDataAddr;
2365         fw.data_len = bnx2_COM_b06FwDataLen;
2366         fw.data_index = 0;
2367         fw.data = bnx2_COM_b06FwData;
2368
2369         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2370         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2371         fw.sbss_index = 0;
2372         fw.sbss = bnx2_COM_b06FwSbss;
2373
2374         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2375         fw.bss_len = bnx2_COM_b06FwBssLen;
2376         fw.bss_index = 0;
2377         fw.bss = bnx2_COM_b06FwBss;
2378
2379         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2380         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2381         fw.rodata_index = 0;
2382         fw.rodata = bnx2_COM_b06FwRodata;
2383
2384         load_cpu_fw(bp, &cpu_reg, &fw);
2385
2386 }
2387
2388 static int
2389 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2390 {
2391         u16 pmcsr;
2392
2393         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2394
2395         switch (state) {
2396         case PCI_D0: {
2397                 u32 val;
2398
2399                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2400                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2401                         PCI_PM_CTRL_PME_STATUS);
2402
2403                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2404                         /* delay required during transition out of D3hot */
2405                         msleep(20);
2406
2407                 val = REG_RD(bp, BNX2_EMAC_MODE);
2408                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2409                 val &= ~BNX2_EMAC_MODE_MPKT;
2410                 REG_WR(bp, BNX2_EMAC_MODE, val);
2411
2412                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2413                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2414                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2415                 break;
2416         }
2417         case PCI_D3hot: {
2418                 int i;
2419                 u32 val, wol_msg;
2420
2421                 if (bp->wol) {
2422                         u32 advertising;
2423                         u8 autoneg;
2424
2425                         autoneg = bp->autoneg;
2426                         advertising = bp->advertising;
2427
2428                         bp->autoneg = AUTONEG_SPEED;
2429                         bp->advertising = ADVERTISED_10baseT_Half |
2430                                 ADVERTISED_10baseT_Full |
2431                                 ADVERTISED_100baseT_Half |
2432                                 ADVERTISED_100baseT_Full |
2433                                 ADVERTISED_Autoneg;
2434
2435                         bnx2_setup_copper_phy(bp);
2436
2437                         bp->autoneg = autoneg;
2438                         bp->advertising = advertising;
2439
2440                         bnx2_set_mac_addr(bp);
2441
2442                         val = REG_RD(bp, BNX2_EMAC_MODE);
2443
2444                         /* Enable port mode. */
2445                         val &= ~BNX2_EMAC_MODE_PORT;
2446                         val |= BNX2_EMAC_MODE_PORT_MII |
2447                                BNX2_EMAC_MODE_MPKT_RCVD |
2448                                BNX2_EMAC_MODE_ACPI_RCVD |
2449                                BNX2_EMAC_MODE_MPKT;
2450
2451                         REG_WR(bp, BNX2_EMAC_MODE, val);
2452
2453                         /* receive all multicast */
2454                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2455                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2456                                        0xffffffff);
2457                         }
2458                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2459                                BNX2_EMAC_RX_MODE_SORT_MODE);
2460
2461                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2462                               BNX2_RPM_SORT_USER0_MC_EN;
2463                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2464                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2465                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2466                                BNX2_RPM_SORT_USER0_ENA);
2467
2468                         /* Need to enable EMAC and RPM for WOL. */
2469                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2470                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2471                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2472                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2473
2474                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2475                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2476                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2477
2478                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2479                 }
2480                 else {
2481                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2482                 }
2483
2484                 if (!(bp->flags & NO_WOL_FLAG))
2485                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2486
2487                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2488                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2489                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2490
2491                         if (bp->wol)
2492                                 pmcsr |= 3;
2493                 }
2494                 else {
2495                         pmcsr |= 3;
2496                 }
2497                 if (bp->wol) {
2498                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2499                 }
2500                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2501                                       pmcsr);
2502
2503                 /* No more memory access after this point until
2504                  * device is brought back to D0.
2505                  */
2506                 udelay(50);
2507                 break;
2508         }
2509         default:
2510                 return -EINVAL;
2511         }
2512         return 0;
2513 }
2514
2515 static int
2516 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2517 {
2518         u32 val;
2519         int j;
2520
2521         /* Request access to the flash interface. */
2522         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2523         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2524                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2525                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2526                         break;
2527
2528                 udelay(5);
2529         }
2530
2531         if (j >= NVRAM_TIMEOUT_COUNT)
2532                 return -EBUSY;
2533
2534         return 0;
2535 }
2536
2537 static int
2538 bnx2_release_nvram_lock(struct bnx2 *bp)
2539 {
2540         int j;
2541         u32 val;
2542
2543         /* Relinquish nvram interface. */
2544         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2545
2546         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2547                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2548                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2549                         break;
2550
2551                 udelay(5);
2552         }
2553
2554         if (j >= NVRAM_TIMEOUT_COUNT)
2555                 return -EBUSY;
2556
2557         return 0;
2558 }
2559
2560
2561 static int
2562 bnx2_enable_nvram_write(struct bnx2 *bp)
2563 {
2564         u32 val;
2565
2566         val = REG_RD(bp, BNX2_MISC_CFG);
2567         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2568
2569         if (!bp->flash_info->buffered) {
2570                 int j;
2571
2572                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2573                 REG_WR(bp, BNX2_NVM_COMMAND,
2574                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2575
2576                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2577                         udelay(5);
2578
2579                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2580                         if (val & BNX2_NVM_COMMAND_DONE)
2581                                 break;
2582                 }
2583
2584                 if (j >= NVRAM_TIMEOUT_COUNT)
2585                         return -EBUSY;
2586         }
2587         return 0;
2588 }
2589
2590 static void
2591 bnx2_disable_nvram_write(struct bnx2 *bp)
2592 {
2593         u32 val;
2594
2595         val = REG_RD(bp, BNX2_MISC_CFG);
2596         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2597 }
2598
2599
2600 static void
2601 bnx2_enable_nvram_access(struct bnx2 *bp)
2602 {
2603         u32 val;
2604
2605         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2606         /* Enable both bits, even on read. */
2607         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2608                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2609 }
2610
2611 static void
2612 bnx2_disable_nvram_access(struct bnx2 *bp)
2613 {
2614         u32 val;
2615
2616         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2617         /* Disable both bits, even after read. */
2618         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2619                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2620                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2621 }
2622
2623 static int
2624 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2625 {
2626         u32 cmd;
2627         int j;
2628
2629         if (bp->flash_info->buffered)
2630                 /* Buffered flash, no erase needed */
2631                 return 0;
2632
2633         /* Build an erase command */
2634         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2635               BNX2_NVM_COMMAND_DOIT;
2636
2637         /* Need to clear DONE bit separately. */
2638         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2639
2640         /* Address of the NVRAM to read from. */
2641         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2642
2643         /* Issue an erase command. */
2644         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2645
2646         /* Wait for completion. */
2647         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2648                 u32 val;
2649
2650                 udelay(5);
2651
2652                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2653                 if (val & BNX2_NVM_COMMAND_DONE)
2654                         break;
2655         }
2656
2657         if (j >= NVRAM_TIMEOUT_COUNT)
2658                 return -EBUSY;
2659
2660         return 0;
2661 }
2662
2663 static int
2664 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2665 {
2666         u32 cmd;
2667         int j;
2668
2669         /* Build the command word. */
2670         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2671
2672         /* Calculate an offset of a buffered flash. */
2673         if (bp->flash_info->buffered) {
2674                 offset = ((offset / bp->flash_info->page_size) <<
2675                            bp->flash_info->page_bits) +
2676                           (offset % bp->flash_info->page_size);
2677         }
2678
2679         /* Need to clear DONE bit separately. */
2680         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2681
2682         /* Address of the NVRAM to read from. */
2683         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2684
2685         /* Issue a read command. */
2686         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2687
2688         /* Wait for completion. */
2689         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2690                 u32 val;
2691
2692                 udelay(5);
2693
2694                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2695                 if (val & BNX2_NVM_COMMAND_DONE) {
2696                         val = REG_RD(bp, BNX2_NVM_READ);
2697
2698                         val = be32_to_cpu(val);
2699                         memcpy(ret_val, &val, 4);
2700                         break;
2701                 }
2702         }
2703         if (j >= NVRAM_TIMEOUT_COUNT)
2704                 return -EBUSY;
2705
2706         return 0;
2707 }
2708
2709
2710 static int
2711 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2712 {
2713         u32 cmd, val32;
2714         int j;
2715
2716         /* Build the command word. */
2717         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2718
2719         /* Calculate an offset of a buffered flash. */
2720         if (bp->flash_info->buffered) {
2721                 offset = ((offset / bp->flash_info->page_size) <<
2722                           bp->flash_info->page_bits) +
2723                          (offset % bp->flash_info->page_size);
2724         }
2725
2726         /* Need to clear DONE bit separately. */
2727         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2728
2729         memcpy(&val32, val, 4);
2730         val32 = cpu_to_be32(val32);
2731
2732         /* Write the data. */
2733         REG_WR(bp, BNX2_NVM_WRITE, val32);
2734
2735         /* Address of the NVRAM to write to. */
2736         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2737
2738         /* Issue the write command. */
2739         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2740
2741         /* Wait for completion. */
2742         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2743                 udelay(5);
2744
2745                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2746                         break;
2747         }
2748         if (j >= NVRAM_TIMEOUT_COUNT)
2749                 return -EBUSY;
2750
2751         return 0;
2752 }
2753
2754 static int
2755 bnx2_init_nvram(struct bnx2 *bp)
2756 {
2757         u32 val;
2758         int j, entry_count, rc;
2759         struct flash_spec *flash;
2760
2761         /* Determine the selected interface. */
2762         val = REG_RD(bp, BNX2_NVM_CFG1);
2763
2764         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2765
2766         rc = 0;
2767         if (val & 0x40000000) {
2768
2769                 /* Flash interface has been reconfigured */
2770                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2771                      j++, flash++) {
2772                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2773                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2774                                 bp->flash_info = flash;
2775                                 break;
2776                         }
2777                 }
2778         }
2779         else {
2780                 u32 mask;
2781                 /* Not yet been reconfigured */
2782
2783                 if (val & (1 << 23))
2784                         mask = FLASH_BACKUP_STRAP_MASK;
2785                 else
2786                         mask = FLASH_STRAP_MASK;
2787
2788                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2789                         j++, flash++) {
2790
2791                         if ((val & mask) == (flash->strapping & mask)) {
2792                                 bp->flash_info = flash;
2793
2794                                 /* Request access to the flash interface. */
2795                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2796                                         return rc;
2797
2798                                 /* Enable access to flash interface */
2799                                 bnx2_enable_nvram_access(bp);
2800
2801                                 /* Reconfigure the flash interface */
2802                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2803                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2804                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2805                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2806
2807                                 /* Disable access to flash interface */
2808                                 bnx2_disable_nvram_access(bp);
2809                                 bnx2_release_nvram_lock(bp);
2810
2811                                 break;
2812                         }
2813                 }
2814         } /* if (val & 0x40000000) */
2815
2816         if (j == entry_count) {
2817                 bp->flash_info = NULL;
2818                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2819                 return -ENODEV;
2820         }
2821
2822         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2823         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2824         if (val)
2825                 bp->flash_size = val;
2826         else
2827                 bp->flash_size = bp->flash_info->total_size;
2828
2829         return rc;
2830 }
2831
2832 static int
2833 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2834                 int buf_size)
2835 {
2836         int rc = 0;
2837         u32 cmd_flags, offset32, len32, extra;
2838
2839         if (buf_size == 0)
2840                 return 0;
2841
2842         /* Request access to the flash interface. */
2843         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2844                 return rc;
2845
2846         /* Enable access to flash interface */
2847         bnx2_enable_nvram_access(bp);
2848
2849         len32 = buf_size;
2850         offset32 = offset;
2851         extra = 0;
2852
2853         cmd_flags = 0;
2854
2855         if (offset32 & 3) {
2856                 u8 buf[4];
2857                 u32 pre_len;
2858
2859                 offset32 &= ~3;
2860                 pre_len = 4 - (offset & 3);
2861
2862                 if (pre_len >= len32) {
2863                         pre_len = len32;
2864                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2865                                     BNX2_NVM_COMMAND_LAST;
2866                 }
2867                 else {
2868                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2869                 }
2870
2871                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2872
2873                 if (rc)
2874                         return rc;
2875
2876                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2877
2878                 offset32 += 4;
2879                 ret_buf += pre_len;
2880                 len32 -= pre_len;
2881         }
2882         if (len32 & 3) {
2883                 extra = 4 - (len32 & 3);
2884                 len32 = (len32 + 4) & ~3;
2885         }
2886
2887         if (len32 == 4) {
2888                 u8 buf[4];
2889
2890                 if (cmd_flags)
2891                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2892                 else
2893                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2894                                     BNX2_NVM_COMMAND_LAST;
2895
2896                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2897
2898                 memcpy(ret_buf, buf, 4 - extra);
2899         }
2900         else if (len32 > 0) {
2901                 u8 buf[4];
2902
2903                 /* Read the first word. */
2904                 if (cmd_flags)
2905                         cmd_flags = 0;
2906                 else
2907                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2908
2909                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2910
2911                 /* Advance to the next dword. */
2912                 offset32 += 4;
2913                 ret_buf += 4;
2914                 len32 -= 4;
2915
2916                 while (len32 > 4 && rc == 0) {
2917                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2918
2919                         /* Advance to the next dword. */
2920                         offset32 += 4;
2921                         ret_buf += 4;
2922                         len32 -= 4;
2923                 }
2924
2925                 if (rc)
2926                         return rc;
2927
2928                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2929                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2930
2931                 memcpy(ret_buf, buf, 4 - extra);
2932         }
2933
2934         /* Disable access to flash interface */
2935         bnx2_disable_nvram_access(bp);
2936
2937         bnx2_release_nvram_lock(bp);
2938
2939         return rc;
2940 }
2941
2942 static int
2943 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2944                 int buf_size)
2945 {
2946         u32 written, offset32, len32;
2947         u8 *buf, start[4], end[4];
2948         int rc = 0;
2949         int align_start, align_end;
2950
2951         buf = data_buf;
2952         offset32 = offset;
2953         len32 = buf_size;
2954         align_start = align_end = 0;
2955
2956         if ((align_start = (offset32 & 3))) {
2957                 offset32 &= ~3;
2958                 len32 += align_start;
2959                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2960                         return rc;
2961         }
2962
2963         if (len32 & 3) {
2964                 if ((len32 > 4) || !align_start) {
2965                         align_end = 4 - (len32 & 3);
2966                         len32 += align_end;
2967                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2968                                 end, 4))) {
2969                                 return rc;
2970                         }
2971                 }
2972         }
2973
2974         if (align_start || align_end) {
2975                 buf = kmalloc(len32, GFP_KERNEL);
2976                 if (buf == 0)
2977                         return -ENOMEM;
2978                 if (align_start) {
2979                         memcpy(buf, start, 4);
2980                 }
2981                 if (align_end) {
2982                         memcpy(buf + len32 - 4, end, 4);
2983                 }
2984                 memcpy(buf + align_start, data_buf, buf_size);
2985         }
2986
2987         written = 0;
2988         while ((written < len32) && (rc == 0)) {
2989                 u32 page_start, page_end, data_start, data_end;
2990                 u32 addr, cmd_flags;
2991                 int i;
2992                 u8 flash_buffer[264];
2993
2994                 /* Find the page_start addr */
2995                 page_start = offset32 + written;
2996                 page_start -= (page_start % bp->flash_info->page_size);
2997                 /* Find the page_end addr */
2998                 page_end = page_start + bp->flash_info->page_size;
2999                 /* Find the data_start addr */
3000                 data_start = (written == 0) ? offset32 : page_start;
3001                 /* Find the data_end addr */
3002                 data_end = (page_end > offset32 + len32) ? 
3003                         (offset32 + len32) : page_end;
3004
3005                 /* Request access to the flash interface. */
3006                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3007                         goto nvram_write_end;
3008
3009                 /* Enable access to flash interface */
3010                 bnx2_enable_nvram_access(bp);
3011
3012                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013                 if (bp->flash_info->buffered == 0) {
3014                         int j;
3015
3016                         /* Read the whole page into the buffer
3017                          * (non-buffer flash only) */
3018                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3019                                 if (j == (bp->flash_info->page_size - 4)) {
3020                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3021                                 }
3022                                 rc = bnx2_nvram_read_dword(bp,
3023                                         page_start + j, 
3024                                         &flash_buffer[j], 
3025                                         cmd_flags);
3026
3027                                 if (rc)
3028                                         goto nvram_write_end;
3029
3030                                 cmd_flags = 0;
3031                         }
3032                 }
3033
3034                 /* Enable writes to flash interface (unlock write-protect) */
3035                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3036                         goto nvram_write_end;
3037
3038                 /* Erase the page */
3039                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3040                         goto nvram_write_end;
3041
3042                 /* Re-enable the write again for the actual write */
3043                 bnx2_enable_nvram_write(bp);
3044
3045                 /* Loop to write back the buffer data from page_start to
3046                  * data_start */
3047                 i = 0;
3048                 if (bp->flash_info->buffered == 0) {
3049                         for (addr = page_start; addr < data_start;
3050                                 addr += 4, i += 4) {
3051                                 
3052                                 rc = bnx2_nvram_write_dword(bp, addr,
3053                                         &flash_buffer[i], cmd_flags);
3054
3055                                 if (rc != 0)
3056                                         goto nvram_write_end;
3057
3058                                 cmd_flags = 0;
3059                         }
3060                 }
3061
3062                 /* Loop to write the new data from data_start to data_end */
3063                 for (addr = data_start; addr < data_end; addr += 4, i++) {
3064                         if ((addr == page_end - 4) ||
3065                                 ((bp->flash_info->buffered) &&
3066                                  (addr == data_end - 4))) {
3067
3068                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3069                         }
3070                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3071                                 cmd_flags);
3072
3073                         if (rc != 0)
3074                                 goto nvram_write_end;
3075
3076                         cmd_flags = 0;
3077                         buf += 4;
3078                 }
3079
3080                 /* Loop to write back the buffer data from data_end
3081                  * to page_end */
3082                 if (bp->flash_info->buffered == 0) {
3083                         for (addr = data_end; addr < page_end;
3084                                 addr += 4, i += 4) {
3085                         
3086                                 if (addr == page_end-4) {
3087                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3088                                 }
3089                                 rc = bnx2_nvram_write_dword(bp, addr,
3090                                         &flash_buffer[i], cmd_flags);
3091
3092                                 if (rc != 0)
3093                                         goto nvram_write_end;
3094
3095                                 cmd_flags = 0;
3096                         }
3097                 }
3098
3099                 /* Disable writes to flash interface (lock write-protect) */
3100                 bnx2_disable_nvram_write(bp);
3101
3102                 /* Disable access to flash interface */
3103                 bnx2_disable_nvram_access(bp);
3104                 bnx2_release_nvram_lock(bp);
3105
3106                 /* Increment written */
3107                 written += data_end - data_start;
3108         }
3109
3110 nvram_write_end:
3111         if (align_start || align_end)
3112                 kfree(buf);
3113         return rc;
3114 }
3115
3116 static int
3117 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3118 {
3119         u32 val;
3120         int i, rc = 0;
3121
3122         /* Wait for the current PCI transaction to complete before
3123          * issuing a reset. */
3124         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3125                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3126                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3127                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3128                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3129         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3130         udelay(5);
3131
3132         /* Wait for the firmware to tell us it is ok to issue a reset. */
3133         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3134
3135         /* Deposit a driver reset signature so the firmware knows that
3136          * this is a soft reset. */
3137         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3138                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3139
3140         /* Do a dummy read to force the chip to complete all current transaction
3141          * before we issue a reset. */
3142         val = REG_RD(bp, BNX2_MISC_ID);
3143
3144         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3145               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3146               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3147
3148         /* Chip reset. */
3149         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3150
3151         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3152             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3153                 msleep(15);
3154
3155         /* Reset takes approximate 30 usec */
3156         for (i = 0; i < 10; i++) {
3157                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3158                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3159                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3160                         break;
3161                 }
3162                 udelay(10);
3163         }
3164
3165         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3166                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3167                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3168                 return -EBUSY;
3169         }
3170
3171         /* Make sure byte swapping is properly configured. */
3172         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3173         if (val != 0x01020304) {
3174                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3175                 return -ENODEV;
3176         }
3177
3178         /* Wait for the firmware to finish its initialization. */
3179         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3180         if (rc)
3181                 return rc;
3182
3183         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3184                 /* Adjust the voltage regular to two steps lower.  The default
3185                  * of this register is 0x0000000e. */
3186                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3187
3188                 /* Remove bad rbuf memory from the free pool. */
3189                 rc = bnx2_alloc_bad_rbuf(bp);
3190         }
3191
3192         return rc;
3193 }
3194
3195 static int
3196 bnx2_init_chip(struct bnx2 *bp)
3197 {
3198         u32 val;
3199         int rc;
3200
3201         /* Make sure the interrupt is not active. */
3202         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3203
3204         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3205               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3206 #ifdef __BIG_ENDIAN
3207               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3208 #endif
3209               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3210               DMA_READ_CHANS << 12 |
3211               DMA_WRITE_CHANS << 16;
3212
3213         val |= (0x2 << 20) | (1 << 11);
3214
3215         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3216                 val |= (1 << 23);
3217
3218         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3219             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3220                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3221
3222         REG_WR(bp, BNX2_DMA_CONFIG, val);
3223
3224         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3225                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3226                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3227                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3228         }
3229
3230         if (bp->flags & PCIX_FLAG) {
3231                 u16 val16;
3232
3233                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3234                                      &val16);
3235                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3236                                       val16 & ~PCI_X_CMD_ERO);
3237         }
3238
3239         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3240                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3241                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3242                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3243
3244         /* Initialize context mapping and zero out the quick contexts.  The
3245          * context block must have already been enabled. */
3246         bnx2_init_context(bp);
3247
3248         bnx2_init_cpus(bp);
3249         bnx2_init_nvram(bp);
3250
3251         bnx2_set_mac_addr(bp);
3252
3253         val = REG_RD(bp, BNX2_MQ_CONFIG);
3254         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3255         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3256         REG_WR(bp, BNX2_MQ_CONFIG, val);
3257
3258         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3259         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3260         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3261
3262         val = (BCM_PAGE_BITS - 8) << 24;
3263         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3264
3265         /* Configure page size. */
3266         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3267         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3268         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3269         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3270
3271         val = bp->mac_addr[0] +
3272               (bp->mac_addr[1] << 8) +
3273               (bp->mac_addr[2] << 16) +
3274               bp->mac_addr[3] +
3275               (bp->mac_addr[4] << 8) +
3276               (bp->mac_addr[5] << 16);
3277         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3278
3279         /* Program the MTU.  Also include 4 bytes for CRC32. */
3280         val = bp->dev->mtu + ETH_HLEN + 4;
3281         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3282                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3283         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3284
3285         bp->last_status_idx = 0;
3286         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3287
3288         /* Set up how to generate a link change interrupt. */
3289         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3290
3291         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3292                (u64) bp->status_blk_mapping & 0xffffffff);
3293         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3294
3295         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3296                (u64) bp->stats_blk_mapping & 0xffffffff);
3297         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3298                (u64) bp->stats_blk_mapping >> 32);
3299
3300         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3301                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3302
3303         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3304                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3305
3306         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3307                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3308
3309         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3310
3311         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3312
3313         REG_WR(bp, BNX2_HC_COM_TICKS,
3314                (bp->com_ticks_int << 16) | bp->com_ticks);
3315
3316         REG_WR(bp, BNX2_HC_CMD_TICKS,
3317                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3318
3319         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3320         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3321
3322         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3323                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3324         else {
3325                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3326                        BNX2_HC_CONFIG_TX_TMR_MODE |
3327                        BNX2_HC_CONFIG_COLLECT_STATS);
3328         }
3329
3330         /* Clear internal stats counters. */
3331         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3332
3333         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3334
3335         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3336             BNX2_PORT_FEATURE_ASF_ENABLED)
3337                 bp->flags |= ASF_ENABLE_FLAG;
3338
3339         /* Initialize the receive filter. */
3340         bnx2_set_rx_mode(bp->dev);
3341
3342         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3343                           0);
3344
3345         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3346         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3347
3348         udelay(20);
3349
3350         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3351
3352         return rc;
3353 }
3354
3355
3356 static void
3357 bnx2_init_tx_ring(struct bnx2 *bp)
3358 {
3359         struct tx_bd *txbd;
3360         u32 val;
3361
3362         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3363                 
3364         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3365         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3366
3367         bp->tx_prod = 0;
3368         bp->tx_cons = 0;
3369         bp->hw_tx_cons = 0;
3370         bp->tx_prod_bseq = 0;
3371         
3372         val = BNX2_L2CTX_TYPE_TYPE_L2;
3373         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3374         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3375
3376         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3377         val |= 8 << 16;
3378         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3379
3380         val = (u64) bp->tx_desc_mapping >> 32;
3381         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3382
3383         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3384         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3385 }
3386
3387 static void
3388 bnx2_init_rx_ring(struct bnx2 *bp)
3389 {
3390         struct rx_bd *rxbd;
3391         int i;
3392         u16 prod, ring_prod; 
3393         u32 val;
3394
3395         /* 8 for CRC and VLAN */
3396         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3397         /* 8 for alignment */
3398         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3399
3400         ring_prod = prod = bp->rx_prod = 0;
3401         bp->rx_cons = 0;
3402         bp->hw_rx_cons = 0;
3403         bp->rx_prod_bseq = 0;
3404                 
3405         for (i = 0; i < bp->rx_max_ring; i++) {
3406                 int j;
3407
3408                 rxbd = &bp->rx_desc_ring[i][0];
3409                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3410                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3411                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3412                 }
3413                 if (i == (bp->rx_max_ring - 1))
3414                         j = 0;
3415                 else
3416                         j = i + 1;
3417                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3418                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3419                                        0xffffffff;
3420         }
3421
3422         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3423         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3424         val |= 0x02 << 8;
3425         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3426
3427         val = (u64) bp->rx_desc_mapping[0] >> 32;
3428         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3429
3430         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3431         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3432
3433         for (i = 0; i < bp->rx_ring_size; i++) {
3434                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3435                         break;
3436                 }
3437                 prod = NEXT_RX_BD(prod);
3438                 ring_prod = RX_RING_IDX(prod);
3439         }
3440         bp->rx_prod = prod;
3441
3442         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3443
3444         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3445 }
3446
3447 static void
3448 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3449 {
3450         u32 num_rings, max;
3451
3452         bp->rx_ring_size = size;
3453         num_rings = 1;
3454         while (size > MAX_RX_DESC_CNT) {
3455                 size -= MAX_RX_DESC_CNT;
3456                 num_rings++;
3457         }
3458         /* round to next power of 2 */
3459         max = MAX_RX_RINGS;
3460         while ((max & num_rings) == 0)
3461                 max >>= 1;
3462
3463         if (num_rings != max)
3464                 max <<= 1;
3465
3466         bp->rx_max_ring = max;
3467         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3468 }
3469
3470 static void
3471 bnx2_free_tx_skbs(struct bnx2 *bp)
3472 {
3473         int i;
3474
3475         if (bp->tx_buf_ring == NULL)
3476                 return;
3477
3478         for (i = 0; i < TX_DESC_CNT; ) {
3479                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3480                 struct sk_buff *skb = tx_buf->skb;
3481                 int j, last;
3482
3483                 if (skb == NULL) {
3484                         i++;
3485                         continue;
3486                 }
3487
3488                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3489                         skb_headlen(skb), PCI_DMA_TODEVICE);
3490
3491                 tx_buf->skb = NULL;
3492
3493                 last = skb_shinfo(skb)->nr_frags;
3494                 for (j = 0; j < last; j++) {
3495                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3496                         pci_unmap_page(bp->pdev,
3497                                 pci_unmap_addr(tx_buf, mapping),
3498                                 skb_shinfo(skb)->frags[j].size,
3499                                 PCI_DMA_TODEVICE);
3500                 }
3501                 dev_kfree_skb_any(skb);
3502                 i += j + 1;
3503         }
3504
3505 }
3506
3507 static void
3508 bnx2_free_rx_skbs(struct bnx2 *bp)
3509 {
3510         int i;
3511
3512         if (bp->rx_buf_ring == NULL)
3513                 return;
3514
3515         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3516                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3517                 struct sk_buff *skb = rx_buf->skb;
3518
3519                 if (skb == NULL)
3520                         continue;
3521
3522                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3523                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3524
3525                 rx_buf->skb = NULL;
3526
3527                 dev_kfree_skb_any(skb);
3528         }
3529 }
3530
3531 static void
3532 bnx2_free_skbs(struct bnx2 *bp)
3533 {
3534         bnx2_free_tx_skbs(bp);
3535         bnx2_free_rx_skbs(bp);
3536 }
3537
3538 static int
3539 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3540 {
3541         int rc;
3542
3543         rc = bnx2_reset_chip(bp, reset_code);
3544         bnx2_free_skbs(bp);
3545         if (rc)
3546                 return rc;
3547
3548         bnx2_init_chip(bp);
3549         bnx2_init_tx_ring(bp);
3550         bnx2_init_rx_ring(bp);
3551         return 0;
3552 }
3553
3554 static int
3555 bnx2_init_nic(struct bnx2 *bp)
3556 {
3557         int rc;
3558
3559         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3560                 return rc;
3561
3562         bnx2_init_phy(bp);
3563         bnx2_set_link(bp);
3564         return 0;
3565 }
3566
3567 static int
3568 bnx2_test_registers(struct bnx2 *bp)
3569 {
3570         int ret;
3571         int i;
3572         static const struct {
3573                 u16   offset;
3574                 u16   flags;
3575                 u32   rw_mask;
3576                 u32   ro_mask;
3577         } reg_tbl[] = {
3578                 { 0x006c, 0, 0x00000000, 0x0000003f },
3579                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3580                 { 0x0094, 0, 0x00000000, 0x00000000 },
3581
3582                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3583                 { 0x0418, 0, 0x00000000, 0xffffffff },
3584                 { 0x041c, 0, 0x00000000, 0xffffffff },
3585                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3586                 { 0x0424, 0, 0x00000000, 0x00000000 },
3587                 { 0x0428, 0, 0x00000000, 0x00000001 },
3588                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3589                 { 0x0454, 0, 0x00000000, 0xffffffff },
3590                 { 0x0458, 0, 0x00000000, 0xffffffff },
3591
3592                 { 0x0808, 0, 0x00000000, 0xffffffff },
3593                 { 0x0854, 0, 0x00000000, 0xffffffff },
3594                 { 0x0868, 0, 0x00000000, 0x77777777 },
3595                 { 0x086c, 0, 0x00000000, 0x77777777 },
3596                 { 0x0870, 0, 0x00000000, 0x77777777 },
3597                 { 0x0874, 0, 0x00000000, 0x77777777 },
3598
3599                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3600                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3601                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3602
3603                 { 0x1000, 0, 0x00000000, 0x00000001 },
3604                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3605
3606                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3607                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3608                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3609                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3610                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3611                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3612                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3613                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3614                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3615                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3616
3617                 { 0x1800, 0, 0x00000000, 0x00000001 },
3618                 { 0x1804, 0, 0x00000000, 0x00000003 },
3619
3620                 { 0x2800, 0, 0x00000000, 0x00000001 },
3621                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3622                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3623                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3624                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3625                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3626                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3627                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3628                 { 0x2840, 0, 0x00000000, 0xffffffff },
3629                 { 0x2844, 0, 0x00000000, 0xffffffff },
3630                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3631                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3632
3633                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3634                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3635
3636                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3637                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3638                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3639                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3640                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3641                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3642                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3643                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3644                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3645
3646                 { 0x5004, 0, 0x00000000, 0x0000007f },
3647                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3648                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3649
3650                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3651                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3652                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3653                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3654                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3655                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3656                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3657                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3658                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3659
3660                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3661                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3662                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3663                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3664                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3665                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3666                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3667                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3668                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3669                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3670                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3671                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3672                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3673                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3674                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3675                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3676                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3677                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3678                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3679                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3680                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3681                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3682                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3683
3684                 { 0xffff, 0, 0x00000000, 0x00000000 },
3685         };
3686
3687         ret = 0;
3688         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3689                 u32 offset, rw_mask, ro_mask, save_val, val;
3690
3691                 offset = (u32) reg_tbl[i].offset;
3692                 rw_mask = reg_tbl[i].rw_mask;
3693                 ro_mask = reg_tbl[i].ro_mask;
3694
3695                 save_val = readl(bp->regview + offset);
3696
3697                 writel(0, bp->regview + offset);
3698
3699                 val = readl(bp->regview + offset);
3700                 if ((val & rw_mask) != 0) {
3701                         goto reg_test_err;
3702                 }
3703
3704                 if ((val & ro_mask) != (save_val & ro_mask)) {
3705                         goto reg_test_err;
3706                 }
3707
3708                 writel(0xffffffff, bp->regview + offset);
3709
3710                 val = readl(bp->regview + offset);
3711                 if ((val & rw_mask) != rw_mask) {
3712                         goto reg_test_err;
3713                 }
3714
3715                 if ((val & ro_mask) != (save_val & ro_mask)) {
3716                         goto reg_test_err;
3717                 }
3718
3719                 writel(save_val, bp->regview + offset);
3720                 continue;
3721
3722 reg_test_err:
3723                 writel(save_val, bp->regview + offset);
3724                 ret = -ENODEV;
3725                 break;
3726         }
3727         return ret;
3728 }
3729
3730 static int
3731 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3732 {
3733         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3734                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3735         int i;
3736
3737         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3738                 u32 offset;
3739
3740                 for (offset = 0; offset < size; offset += 4) {
3741
3742                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3743
3744                         if (REG_RD_IND(bp, start + offset) !=
3745                                 test_pattern[i]) {
3746                                 return -ENODEV;
3747                         }
3748                 }
3749         }
3750         return 0;
3751 }
3752
3753 static int
3754 bnx2_test_memory(struct bnx2 *bp)
3755 {
3756         int ret = 0;
3757         int i;
3758         static const struct {
3759                 u32   offset;
3760                 u32   len;
3761         } mem_tbl[] = {
3762                 { 0x60000,  0x4000 },
3763                 { 0xa0000,  0x3000 },
3764                 { 0xe0000,  0x4000 },
3765                 { 0x120000, 0x4000 },
3766                 { 0x1a0000, 0x4000 },
3767                 { 0x160000, 0x4000 },
3768                 { 0xffffffff, 0    },
3769         };
3770
3771         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3772                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3773                         mem_tbl[i].len)) != 0) {
3774                         return ret;
3775                 }
3776         }
3777         
3778         return ret;
3779 }
3780
3781 #define BNX2_MAC_LOOPBACK       0
3782 #define BNX2_PHY_LOOPBACK       1
3783
3784 static int
3785 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3786 {
3787         unsigned int pkt_size, num_pkts, i;
3788         struct sk_buff *skb, *rx_skb;
3789         unsigned char *packet;
3790         u16 rx_start_idx, rx_idx;
3791         dma_addr_t map;
3792         struct tx_bd *txbd;
3793         struct sw_bd *rx_buf;
3794         struct l2_fhdr *rx_hdr;
3795         int ret = -ENODEV;
3796
3797         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3798                 bp->loopback = MAC_LOOPBACK;
3799                 bnx2_set_mac_loopback(bp);
3800         }
3801         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3802                 bp->loopback = 0;
3803                 bnx2_set_phy_loopback(bp);
3804         }
3805         else
3806                 return -EINVAL;
3807
3808         pkt_size = 1514;
3809         skb = dev_alloc_skb(pkt_size);
3810         if (!skb)
3811                 return -ENOMEM;
3812         packet = skb_put(skb, pkt_size);
3813         memcpy(packet, bp->mac_addr, 6);
3814         memset(packet + 6, 0x0, 8);
3815         for (i = 14; i < pkt_size; i++)
3816                 packet[i] = (unsigned char) (i & 0xff);
3817
3818         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3819                 PCI_DMA_TODEVICE);
3820
3821         REG_WR(bp, BNX2_HC_COMMAND,
3822                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3823
3824         REG_RD(bp, BNX2_HC_COMMAND);
3825
3826         udelay(5);
3827         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3828
3829         num_pkts = 0;
3830
3831         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3832
3833         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3834         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3835         txbd->tx_bd_mss_nbytes = pkt_size;
3836         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3837
3838         num_pkts++;
3839         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3840         bp->tx_prod_bseq += pkt_size;
3841
3842         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3843         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3844
3845         udelay(100);
3846
3847         REG_WR(bp, BNX2_HC_COMMAND,
3848                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3849
3850         REG_RD(bp, BNX2_HC_COMMAND);
3851
3852         udelay(5);
3853
3854         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3855         dev_kfree_skb_irq(skb);
3856
3857         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3858                 goto loopback_test_done;
3859         }
3860
3861         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3862         if (rx_idx != rx_start_idx + num_pkts) {
3863                 goto loopback_test_done;
3864         }
3865
3866         rx_buf = &bp->rx_buf_ring[rx_start_idx];
3867         rx_skb = rx_buf->skb;
3868
3869         rx_hdr = (struct l2_fhdr *) rx_skb->data;
3870         skb_reserve(rx_skb, bp->rx_offset);
3871
3872         pci_dma_sync_single_for_cpu(bp->pdev,
3873                 pci_unmap_addr(rx_buf, mapping),
3874                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3875
3876         if (rx_hdr->l2_fhdr_status &
3877                 (L2_FHDR_ERRORS_BAD_CRC |
3878                 L2_FHDR_ERRORS_PHY_DECODE |
3879                 L2_FHDR_ERRORS_ALIGNMENT |
3880                 L2_FHDR_ERRORS_TOO_SHORT |
3881                 L2_FHDR_ERRORS_GIANT_FRAME)) {
3882
3883                 goto loopback_test_done;
3884         }
3885
3886         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3887                 goto loopback_test_done;
3888         }
3889
3890         for (i = 14; i < pkt_size; i++) {
3891                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3892                         goto loopback_test_done;
3893                 }
3894         }
3895
3896         ret = 0;
3897
3898 loopback_test_done:
3899         bp->loopback = 0;
3900         return ret;
3901 }
3902
3903 #define BNX2_MAC_LOOPBACK_FAILED        1
3904 #define BNX2_PHY_LOOPBACK_FAILED        2
3905 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
3906                                          BNX2_PHY_LOOPBACK_FAILED)
3907
3908 static int
3909 bnx2_test_loopback(struct bnx2 *bp)
3910 {
3911         int rc = 0;
3912
3913         if (!netif_running(bp->dev))
3914                 return BNX2_LOOPBACK_FAILED;
3915
3916         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3917         spin_lock_bh(&bp->phy_lock);
3918         bnx2_init_phy(bp);
3919         spin_unlock_bh(&bp->phy_lock);
3920         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3921                 rc |= BNX2_MAC_LOOPBACK_FAILED;
3922         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3923                 rc |= BNX2_PHY_LOOPBACK_FAILED;
3924         return rc;
3925 }
3926
3927 #define NVRAM_SIZE 0x200
3928 #define CRC32_RESIDUAL 0xdebb20e3
3929
3930 static int
3931 bnx2_test_nvram(struct bnx2 *bp)
3932 {
3933         u32 buf[NVRAM_SIZE / 4];
3934         u8 *data = (u8 *) buf;
3935         int rc = 0;
3936         u32 magic, csum;
3937
3938         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3939                 goto test_nvram_done;
3940
3941         magic = be32_to_cpu(buf[0]);
3942         if (magic != 0x669955aa) {
3943                 rc = -ENODEV;
3944                 goto test_nvram_done;
3945         }
3946
3947         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3948                 goto test_nvram_done;
3949
3950         csum = ether_crc_le(0x100, data);
3951         if (csum != CRC32_RESIDUAL) {
3952                 rc = -ENODEV;
3953                 goto test_nvram_done;
3954         }
3955
3956         csum = ether_crc_le(0x100, data + 0x100);
3957         if (csum != CRC32_RESIDUAL) {
3958                 rc = -ENODEV;
3959         }
3960
3961 test_nvram_done:
3962         return rc;
3963 }
3964
3965 static int
3966 bnx2_test_link(struct bnx2 *bp)
3967 {
3968         u32 bmsr;
3969
3970         spin_lock_bh(&bp->phy_lock);
3971         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3972         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3973         spin_unlock_bh(&bp->phy_lock);
3974                 
3975         if (bmsr & BMSR_LSTATUS) {
3976                 return 0;
3977         }
3978         return -ENODEV;
3979 }
3980
3981 static int
3982 bnx2_test_intr(struct bnx2 *bp)
3983 {
3984         int i;
3985         u16 status_idx;
3986
3987         if (!netif_running(bp->dev))
3988                 return -ENODEV;
3989
3990         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3991
3992         /* This register is not touched during run-time. */
3993         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3994         REG_RD(bp, BNX2_HC_COMMAND);
3995
3996         for (i = 0; i < 10; i++) {
3997                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3998                         status_idx) {
3999
4000                         break;
4001                 }
4002
4003                 msleep_interruptible(10);
4004         }
4005         if (i < 10)
4006                 return 0;
4007
4008         return -ENODEV;
4009 }
4010
4011 static void
4012 bnx2_timer(unsigned long data)
4013 {
4014         struct bnx2 *bp = (struct bnx2 *) data;
4015         u32 msg;
4016
4017         if (!netif_running(bp->dev))
4018                 return;
4019
4020         if (atomic_read(&bp->intr_sem) != 0)
4021                 goto bnx2_restart_timer;
4022
4023         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4024         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4025
4026         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4027             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4028
4029                 spin_lock(&bp->phy_lock);
4030                 if (bp->serdes_an_pending) {
4031                         bp->serdes_an_pending--;
4032                 }
4033                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4034                         u32 bmcr;
4035
4036                         bp->current_interval = bp->timer_interval;
4037
4038                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4039
4040                         if (bmcr & BMCR_ANENABLE) {
4041                                 u32 phy1, phy2;
4042
4043                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4044                                 bnx2_read_phy(bp, 0x1c, &phy1);
4045
4046                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4047                                 bnx2_read_phy(bp, 0x15, &phy2);
4048                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4049                                 bnx2_read_phy(bp, 0x15, &phy2);
4050
4051                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4052                                         !(phy2 & 0x20)) {       /* no CONFIG */
4053
4054                                         bmcr &= ~BMCR_ANENABLE;
4055                                         bmcr |= BMCR_SPEED1000 |
4056                                                 BMCR_FULLDPLX;
4057                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4058                                         bp->phy_flags |=
4059                                                 PHY_PARALLEL_DETECT_FLAG;
4060                                 }
4061                         }
4062                 }
4063                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4064                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4065                         u32 phy2;
4066
4067                         bnx2_write_phy(bp, 0x17, 0x0f01);
4068                         bnx2_read_phy(bp, 0x15, &phy2);
4069                         if (phy2 & 0x20) {
4070                                 u32 bmcr;
4071
4072                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4073                                 bmcr |= BMCR_ANENABLE;
4074                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4075
4076                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4077
4078                         }
4079                 }
4080                 else
4081                         bp->current_interval = bp->timer_interval;
4082
4083                 spin_unlock(&bp->phy_lock);
4084         }
4085
4086 bnx2_restart_timer:
4087         mod_timer(&bp->timer, jiffies + bp->current_interval);
4088 }
4089
4090 /* Called with rtnl_lock */
4091 static int
4092 bnx2_open(struct net_device *dev)
4093 {
4094         struct bnx2 *bp = netdev_priv(dev);
4095         int rc;
4096
4097         bnx2_set_power_state(bp, PCI_D0);
4098         bnx2_disable_int(bp);
4099
4100         rc = bnx2_alloc_mem(bp);
4101         if (rc)
4102                 return rc;
4103
4104         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4105                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4106                 !disable_msi) {
4107
4108                 if (pci_enable_msi(bp->pdev) == 0) {
4109                         bp->flags |= USING_MSI_FLAG;
4110                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4111                                         dev);
4112                 }
4113                 else {
4114                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4115                                         SA_SHIRQ, dev->name, dev);
4116                 }
4117         }
4118         else {
4119                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4120                                 dev->name, dev);
4121         }
4122         if (rc) {
4123                 bnx2_free_mem(bp);
4124                 return rc;
4125         }
4126
4127         rc = bnx2_init_nic(bp);
4128
4129         if (rc) {
4130                 free_irq(bp->pdev->irq, dev);
4131                 if (bp->flags & USING_MSI_FLAG) {
4132                         pci_disable_msi(bp->pdev);
4133                         bp->flags &= ~USING_MSI_FLAG;
4134                 }
4135                 bnx2_free_skbs(bp);
4136                 bnx2_free_mem(bp);
4137                 return rc;
4138         }
4139         
4140         mod_timer(&bp->timer, jiffies + bp->current_interval);
4141
4142         atomic_set(&bp->intr_sem, 0);
4143
4144         bnx2_enable_int(bp);
4145
4146         if (bp->flags & USING_MSI_FLAG) {
4147                 /* Test MSI to make sure it is working
4148                  * If MSI test fails, go back to INTx mode
4149                  */
4150                 if (bnx2_test_intr(bp) != 0) {
4151                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4152                                " using MSI, switching to INTx mode. Please"
4153                                " report this failure to the PCI maintainer"
4154                                " and include system chipset information.\n",
4155                                bp->dev->name);
4156
4157                         bnx2_disable_int(bp);
4158                         free_irq(bp->pdev->irq, dev);
4159                         pci_disable_msi(bp->pdev);
4160                         bp->flags &= ~USING_MSI_FLAG;
4161
4162                         rc = bnx2_init_nic(bp);
4163
4164                         if (!rc) {
4165                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4166                                         SA_SHIRQ, dev->name, dev);
4167                         }
4168                         if (rc) {
4169                                 bnx2_free_skbs(bp);
4170                                 bnx2_free_mem(bp);
4171                                 del_timer_sync(&bp->timer);
4172                                 return rc;
4173                         }
4174                         bnx2_enable_int(bp);
4175                 }
4176         }
4177         if (bp->flags & USING_MSI_FLAG) {
4178                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4179         }
4180
4181         netif_start_queue(dev);
4182
4183         return 0;
4184 }
4185
4186 static void
4187 bnx2_reset_task(void *data)
4188 {
4189         struct bnx2 *bp = data;
4190
4191         if (!netif_running(bp->dev))
4192                 return;
4193
4194         bp->in_reset_task = 1;
4195         bnx2_netif_stop(bp);
4196
4197         bnx2_init_nic(bp);
4198
4199         atomic_set(&bp->intr_sem, 1);
4200         bnx2_netif_start(bp);
4201         bp->in_reset_task = 0;
4202 }
4203
4204 static void
4205 bnx2_tx_timeout(struct net_device *dev)
4206 {
4207         struct bnx2 *bp = netdev_priv(dev);
4208
4209         /* This allows the netif to be shutdown gracefully before resetting */
4210         schedule_work(&bp->reset_task);
4211 }
4212
4213 #ifdef BCM_VLAN
4214 /* Called with rtnl_lock */
4215 static void
4216 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4217 {
4218         struct bnx2 *bp = netdev_priv(dev);
4219
4220         bnx2_netif_stop(bp);
4221
4222         bp->vlgrp = vlgrp;
4223         bnx2_set_rx_mode(dev);
4224
4225         bnx2_netif_start(bp);
4226 }
4227
4228 /* Called with rtnl_lock */
4229 static void
4230 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4231 {
4232         struct bnx2 *bp = netdev_priv(dev);
4233
4234         bnx2_netif_stop(bp);
4235
4236         if (bp->vlgrp)
4237                 bp->vlgrp->vlan_devices[vid] = NULL;
4238         bnx2_set_rx_mode(dev);
4239
4240         bnx2_netif_start(bp);
4241 }
4242 #endif
4243
4244 /* Called with dev->xmit_lock.
4245  * hard_start_xmit is pseudo-lockless - a lock is only required when
4246  * the tx queue is full. This way, we get the benefit of lockless
4247  * operations most of the time without the complexities to handle
4248  * netif_stop_queue/wake_queue race conditions.
4249  */
4250 static int
4251 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4252 {
4253         struct bnx2 *bp = netdev_priv(dev);
4254         dma_addr_t mapping;
4255         struct tx_bd *txbd;
4256         struct sw_bd *tx_buf;
4257         u32 len, vlan_tag_flags, last_frag, mss;
4258         u16 prod, ring_prod;
4259         int i;
4260
4261         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4262                 netif_stop_queue(dev);
4263                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4264                         dev->name);
4265
4266                 return NETDEV_TX_BUSY;
4267         }
4268         len = skb_headlen(skb);
4269         prod = bp->tx_prod;
4270         ring_prod = TX_RING_IDX(prod);
4271
4272         vlan_tag_flags = 0;
4273         if (skb->ip_summed == CHECKSUM_HW) {
4274                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4275         }
4276
4277         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4278                 vlan_tag_flags |=
4279                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4280         }
4281 #ifdef BCM_TSO 
4282         if ((mss = skb_shinfo(skb)->tso_size) &&
4283                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4284                 u32 tcp_opt_len, ip_tcp_len;
4285
4286                 if (skb_header_cloned(skb) &&
4287                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4288                         dev_kfree_skb(skb);
4289                         return NETDEV_TX_OK;
4290                 }
4291
4292                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4293                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4294
4295                 tcp_opt_len = 0;
4296                 if (skb->h.th->doff > 5) {
4297                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4298                 }
4299                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4300
4301                 skb->nh.iph->check = 0;
4302                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4303                 skb->h.th->check =
4304                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4305                                             skb->nh.iph->daddr,
4306                                             0, IPPROTO_TCP, 0);
4307
4308                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4309                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4310                                 (tcp_opt_len >> 2)) << 8;
4311                 }
4312         }
4313         else
4314 #endif
4315         {
4316                 mss = 0;
4317         }
4318
4319         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4320         
4321         tx_buf = &bp->tx_buf_ring[ring_prod];
4322         tx_buf->skb = skb;
4323         pci_unmap_addr_set(tx_buf, mapping, mapping);
4324
4325         txbd = &bp->tx_desc_ring[ring_prod];
4326
4327         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4328         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4329         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4330         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4331
4332         last_frag = skb_shinfo(skb)->nr_frags;
4333
4334         for (i = 0; i < last_frag; i++) {
4335                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4336
4337                 prod = NEXT_TX_BD(prod);
4338                 ring_prod = TX_RING_IDX(prod);
4339                 txbd = &bp->tx_desc_ring[ring_prod];
4340
4341                 len = frag->size;
4342                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4343                         len, PCI_DMA_TODEVICE);
4344                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4345                                 mapping, mapping);
4346
4347                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4348                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4349                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4350                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4351
4352         }
4353         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4354
4355         prod = NEXT_TX_BD(prod);
4356         bp->tx_prod_bseq += skb->len;
4357
4358         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4359         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4360
4361         mmiowb();
4362
4363         bp->tx_prod = prod;
4364         dev->trans_start = jiffies;
4365
4366         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4367                 spin_lock(&bp->tx_lock);
4368                 netif_stop_queue(dev);
4369                 
4370                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4371                         netif_wake_queue(dev);
4372                 spin_unlock(&bp->tx_lock);
4373         }
4374
4375         return NETDEV_TX_OK;
4376 }
4377
4378 /* Called with rtnl_lock */
4379 static int
4380 bnx2_close(struct net_device *dev)
4381 {
4382         struct bnx2 *bp = netdev_priv(dev);
4383         u32 reset_code;
4384
4385         /* Calling flush_scheduled_work() may deadlock because
4386          * linkwatch_event() may be on the workqueue and it will try to get
4387          * the rtnl_lock which we are holding.
4388          */
4389         while (bp->in_reset_task)
4390                 msleep(1);
4391
4392         bnx2_netif_stop(bp);
4393         del_timer_sync(&bp->timer);
4394         if (bp->flags & NO_WOL_FLAG)
4395                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4396         else if (bp->wol)
4397                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4398         else
4399                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4400         bnx2_reset_chip(bp, reset_code);
4401         free_irq(bp->pdev->irq, dev);
4402         if (bp->flags & USING_MSI_FLAG) {
4403                 pci_disable_msi(bp->pdev);
4404                 bp->flags &= ~USING_MSI_FLAG;
4405         }
4406         bnx2_free_skbs(bp);
4407         bnx2_free_mem(bp);
4408         bp->link_up = 0;
4409         netif_carrier_off(bp->dev);
4410         bnx2_set_power_state(bp, PCI_D3hot);
4411         return 0;
4412 }
4413
4414 #define GET_NET_STATS64(ctr)                                    \
4415         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4416         (unsigned long) (ctr##_lo)
4417
4418 #define GET_NET_STATS32(ctr)            \
4419         (ctr##_lo)
4420
4421 #if (BITS_PER_LONG == 64)
4422 #define GET_NET_STATS   GET_NET_STATS64
4423 #else
4424 #define GET_NET_STATS   GET_NET_STATS32
4425 #endif
4426
4427 static struct net_device_stats *
4428 bnx2_get_stats(struct net_device *dev)
4429 {
4430         struct bnx2 *bp = netdev_priv(dev);
4431         struct statistics_block *stats_blk = bp->stats_blk;
4432         struct net_device_stats *net_stats = &bp->net_stats;
4433
4434         if (bp->stats_blk == NULL) {
4435                 return net_stats;
4436         }
4437         net_stats->rx_packets =
4438                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4439                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4440                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4441
4442         net_stats->tx_packets =
4443                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4444                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4445                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4446
4447         net_stats->rx_bytes =
4448                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4449
4450         net_stats->tx_bytes =
4451                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4452
4453         net_stats->multicast = 
4454                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4455
4456         net_stats->collisions = 
4457                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4458
4459         net_stats->rx_length_errors = 
4460                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4461                 stats_blk->stat_EtherStatsOverrsizePkts);
4462
4463         net_stats->rx_over_errors = 
4464                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4465
4466         net_stats->rx_frame_errors = 
4467                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4468
4469         net_stats->rx_crc_errors = 
4470                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4471
4472         net_stats->rx_errors = net_stats->rx_length_errors +
4473                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4474                 net_stats->rx_crc_errors;
4475
4476         net_stats->tx_aborted_errors =
4477                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4478                 stats_blk->stat_Dot3StatsLateCollisions);
4479
4480         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4481             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4482                 net_stats->tx_carrier_errors = 0;
4483         else {
4484                 net_stats->tx_carrier_errors =
4485                         (unsigned long)
4486                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4487         }
4488
4489         net_stats->tx_errors =
4490                 (unsigned long) 
4491                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4492                 +
4493                 net_stats->tx_aborted_errors +
4494                 net_stats->tx_carrier_errors;
4495
4496         return net_stats;
4497 }
4498
4499 /* All ethtool functions called with rtnl_lock */
4500
4501 static int
4502 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4503 {
4504         struct bnx2 *bp = netdev_priv(dev);
4505
4506         cmd->supported = SUPPORTED_Autoneg;
4507         if (bp->phy_flags & PHY_SERDES_FLAG) {
4508                 cmd->supported |= SUPPORTED_1000baseT_Full |
4509                         SUPPORTED_FIBRE;
4510
4511                 cmd->port = PORT_FIBRE;
4512         }
4513         else {
4514                 cmd->supported |= SUPPORTED_10baseT_Half |
4515                         SUPPORTED_10baseT_Full |
4516                         SUPPORTED_100baseT_Half |
4517                         SUPPORTED_100baseT_Full |
4518                         SUPPORTED_1000baseT_Full |
4519                         SUPPORTED_TP;
4520
4521                 cmd->port = PORT_TP;
4522         }
4523
4524         cmd->advertising = bp->advertising;
4525
4526         if (bp->autoneg & AUTONEG_SPEED) {
4527                 cmd->autoneg = AUTONEG_ENABLE;
4528         }
4529         else {
4530                 cmd->autoneg = AUTONEG_DISABLE;
4531         }
4532
4533         if (netif_carrier_ok(dev)) {
4534                 cmd->speed = bp->line_speed;
4535                 cmd->duplex = bp->duplex;
4536         }
4537         else {
4538                 cmd->speed = -1;
4539                 cmd->duplex = -1;
4540         }
4541
4542         cmd->transceiver = XCVR_INTERNAL;
4543         cmd->phy_address = bp->phy_addr;
4544
4545         return 0;
4546 }
4547   
4548 static int
4549 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4550 {
4551         struct bnx2 *bp = netdev_priv(dev);
4552         u8 autoneg = bp->autoneg;
4553         u8 req_duplex = bp->req_duplex;
4554         u16 req_line_speed = bp->req_line_speed;
4555         u32 advertising = bp->advertising;
4556
4557         if (cmd->autoneg == AUTONEG_ENABLE) {
4558                 autoneg |= AUTONEG_SPEED;
4559
4560                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4561
4562                 /* allow advertising 1 speed */
4563                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4564                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4565                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4566                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4567
4568                         if (bp->phy_flags & PHY_SERDES_FLAG)
4569                                 return -EINVAL;
4570
4571                         advertising = cmd->advertising;
4572
4573                 }
4574                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4575                         advertising = cmd->advertising;
4576                 }
4577                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4578                         return -EINVAL;
4579                 }
4580                 else {
4581                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4582                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4583                         }
4584                         else {
4585                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4586                         }
4587                 }
4588                 advertising |= ADVERTISED_Autoneg;
4589         }
4590         else {
4591                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4592                         if ((cmd->speed != SPEED_1000) ||
4593                                 (cmd->duplex != DUPLEX_FULL)) {
4594                                 return -EINVAL;
4595                         }
4596                 }
4597                 else if (cmd->speed == SPEED_1000) {
4598                         return -EINVAL;
4599                 }
4600                 autoneg &= ~AUTONEG_SPEED;
4601                 req_line_speed = cmd->speed;
4602                 req_duplex = cmd->duplex;
4603                 advertising = 0;
4604         }
4605
4606         bp->autoneg = autoneg;
4607         bp->advertising = advertising;
4608         bp->req_line_speed = req_line_speed;
4609         bp->req_duplex = req_duplex;
4610
4611         spin_lock_bh(&bp->phy_lock);
4612
4613         bnx2_setup_phy(bp);
4614
4615         spin_unlock_bh(&bp->phy_lock);
4616
4617         return 0;
4618 }
4619
4620 static void
4621 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4622 {
4623         struct bnx2 *bp = netdev_priv(dev);
4624
4625         strcpy(info->driver, DRV_MODULE_NAME);
4626         strcpy(info->version, DRV_MODULE_VERSION);
4627         strcpy(info->bus_info, pci_name(bp->pdev));
4628         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4629         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4630         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4631         info->fw_version[1] = info->fw_version[3] = '.';
4632         info->fw_version[5] = 0;
4633 }
4634
4635 #define BNX2_REGDUMP_LEN                (32 * 1024)
4636
4637 static int
4638 bnx2_get_regs_len(struct net_device *dev)
4639 {
4640         return BNX2_REGDUMP_LEN;
4641 }
4642
4643 static void
4644 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4645 {
4646         u32 *p = _p, i, offset;
4647         u8 *orig_p = _p;
4648         struct bnx2 *bp = netdev_priv(dev);
4649         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4650                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4651                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4652                                  0x1040, 0x1048, 0x1080, 0x10a4,
4653                                  0x1400, 0x1490, 0x1498, 0x14f0,
4654                                  0x1500, 0x155c, 0x1580, 0x15dc,
4655                                  0x1600, 0x1658, 0x1680, 0x16d8,
4656                                  0x1800, 0x1820, 0x1840, 0x1854,
4657                                  0x1880, 0x1894, 0x1900, 0x1984,
4658                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4659                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4660                                  0x2000, 0x2030, 0x23c0, 0x2400,
4661                                  0x2800, 0x2820, 0x2830, 0x2850,
4662                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4663                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4664                                  0x4080, 0x4090, 0x43c0, 0x4458,
4665                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4666                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4667                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4668                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4669                                  0x6800, 0x6848, 0x684c, 0x6860,
4670                                  0x6888, 0x6910, 0x8000 };
4671
4672         regs->version = 0;
4673
4674         memset(p, 0, BNX2_REGDUMP_LEN);
4675
4676         if (!netif_running(bp->dev))
4677                 return;
4678
4679         i = 0;
4680         offset = reg_boundaries[0];
4681         p += offset;
4682         while (offset < BNX2_REGDUMP_LEN) {
4683                 *p++ = REG_RD(bp, offset);
4684                 offset += 4;
4685                 if (offset == reg_boundaries[i + 1]) {
4686                         offset = reg_boundaries[i + 2];
4687                         p = (u32 *) (orig_p + offset);
4688                         i += 2;
4689                 }
4690         }
4691 }
4692
4693 static void
4694 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4695 {
4696         struct bnx2 *bp = netdev_priv(dev);
4697
4698         if (bp->flags & NO_WOL_FLAG) {
4699                 wol->supported = 0;
4700                 wol->wolopts = 0;
4701         }
4702         else {
4703                 wol->supported = WAKE_MAGIC;
4704                 if (bp->wol)
4705                         wol->wolopts = WAKE_MAGIC;
4706                 else
4707                         wol->wolopts = 0;
4708         }
4709         memset(&wol->sopass, 0, sizeof(wol->sopass));
4710 }
4711
4712 static int
4713 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4714 {
4715         struct bnx2 *bp = netdev_priv(dev);
4716
4717         if (wol->wolopts & ~WAKE_MAGIC)
4718                 return -EINVAL;
4719
4720         if (wol->wolopts & WAKE_MAGIC) {
4721                 if (bp->flags & NO_WOL_FLAG)
4722                         return -EINVAL;
4723
4724                 bp->wol = 1;
4725         }
4726         else {
4727                 bp->wol = 0;
4728         }
4729         return 0;
4730 }
4731
4732 static int
4733 bnx2_nway_reset(struct net_device *dev)
4734 {
4735         struct bnx2 *bp = netdev_priv(dev);
4736         u32 bmcr;
4737
4738         if (!(bp->autoneg & AUTONEG_SPEED)) {
4739                 return -EINVAL;
4740         }
4741
4742         spin_lock_bh(&bp->phy_lock);
4743
4744         /* Force a link down visible on the other side */
4745         if (bp->phy_flags & PHY_SERDES_FLAG) {
4746                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4747                 spin_unlock_bh(&bp->phy_lock);
4748
4749                 msleep(20);
4750
4751                 spin_lock_bh(&bp->phy_lock);
4752                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4753                         bp->current_interval = SERDES_AN_TIMEOUT;
4754                         bp->serdes_an_pending = 1;
4755                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4756                 }
4757         }
4758
4759         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4760         bmcr &= ~BMCR_LOOPBACK;
4761         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4762
4763         spin_unlock_bh(&bp->phy_lock);
4764
4765         return 0;
4766 }
4767
4768 static int
4769 bnx2_get_eeprom_len(struct net_device *dev)
4770 {
4771         struct bnx2 *bp = netdev_priv(dev);
4772
4773         if (bp->flash_info == NULL)
4774                 return 0;
4775
4776         return (int) bp->flash_size;
4777 }
4778
4779 static int
4780 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4781                 u8 *eebuf)
4782 {
4783         struct bnx2 *bp = netdev_priv(dev);
4784         int rc;
4785
4786         /* parameters already validated in ethtool_get_eeprom */
4787
4788         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4789
4790         return rc;
4791 }
4792
4793 static int
4794 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4795                 u8 *eebuf)
4796 {
4797         struct bnx2 *bp = netdev_priv(dev);
4798         int rc;
4799
4800         /* parameters already validated in ethtool_set_eeprom */
4801
4802         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4803
4804         return rc;
4805 }
4806
4807 static int
4808 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4809 {
4810         struct bnx2 *bp = netdev_priv(dev);
4811
4812         memset(coal, 0, sizeof(struct ethtool_coalesce));
4813
4814         coal->rx_coalesce_usecs = bp->rx_ticks;
4815         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4816         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4817         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4818
4819         coal->tx_coalesce_usecs = bp->tx_ticks;
4820         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4821         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4822         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4823
4824         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4825
4826         return 0;
4827 }
4828
4829 static int
4830 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4831 {
4832         struct bnx2 *bp = netdev_priv(dev);
4833
4834         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4835         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4836
4837         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4838         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4839
4840         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4841         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4842
4843         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4844         if (bp->rx_quick_cons_trip_int > 0xff)
4845                 bp->rx_quick_cons_trip_int = 0xff;
4846
4847         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4848         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4849
4850         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4851         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4852
4853         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4854         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4855
4856         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4857         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4858                 0xff;
4859
4860         bp->stats_ticks = coal->stats_block_coalesce_usecs;
4861         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4862         bp->stats_ticks &= 0xffff00;
4863
4864         if (netif_running(bp->dev)) {
4865                 bnx2_netif_stop(bp);
4866                 bnx2_init_nic(bp);
4867                 bnx2_netif_start(bp);
4868         }
4869
4870         return 0;
4871 }
4872
4873 static void
4874 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4875 {
4876         struct bnx2 *bp = netdev_priv(dev);
4877
4878         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4879         ering->rx_mini_max_pending = 0;
4880         ering->rx_jumbo_max_pending = 0;
4881
4882         ering->rx_pending = bp->rx_ring_size;
4883         ering->rx_mini_pending = 0;
4884         ering->rx_jumbo_pending = 0;
4885
4886         ering->tx_max_pending = MAX_TX_DESC_CNT;
4887         ering->tx_pending = bp->tx_ring_size;
4888 }
4889
4890 static int
4891 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4892 {
4893         struct bnx2 *bp = netdev_priv(dev);
4894
4895         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4896                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4897                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4898
4899                 return -EINVAL;
4900         }
4901         if (netif_running(bp->dev)) {
4902                 bnx2_netif_stop(bp);
4903                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4904                 bnx2_free_skbs(bp);
4905                 bnx2_free_mem(bp);
4906         }
4907
4908         bnx2_set_rx_ring_size(bp, ering->rx_pending);
4909         bp->tx_ring_size = ering->tx_pending;
4910
4911         if (netif_running(bp->dev)) {
4912                 int rc;
4913
4914                 rc = bnx2_alloc_mem(bp);
4915                 if (rc)
4916                         return rc;
4917                 bnx2_init_nic(bp);
4918                 bnx2_netif_start(bp);
4919         }
4920
4921         return 0;
4922 }
4923
4924 static void
4925 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4926 {
4927         struct bnx2 *bp = netdev_priv(dev);
4928
4929         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4930         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4931         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4932 }
4933
4934 static int
4935 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4936 {
4937         struct bnx2 *bp = netdev_priv(dev);
4938
4939         bp->req_flow_ctrl = 0;
4940         if (epause->rx_pause)
4941                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4942         if (epause->tx_pause)
4943                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4944
4945         if (epause->autoneg) {
4946                 bp->autoneg |= AUTONEG_FLOW_CTRL;
4947         }
4948         else {
4949                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4950         }
4951
4952         spin_lock_bh(&bp->phy_lock);
4953
4954         bnx2_setup_phy(bp);
4955
4956         spin_unlock_bh(&bp->phy_lock);
4957
4958         return 0;
4959 }
4960
4961 static u32
4962 bnx2_get_rx_csum(struct net_device *dev)
4963 {
4964         struct bnx2 *bp = netdev_priv(dev);
4965
4966         return bp->rx_csum;
4967 }
4968
4969 static int
4970 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4971 {
4972         struct bnx2 *bp = netdev_priv(dev);
4973
4974         bp->rx_csum = data;
4975         return 0;
4976 }
4977
4978 #define BNX2_NUM_STATS 45
4979
4980 static struct {
4981         char string[ETH_GSTRING_LEN];
4982 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4983         { "rx_bytes" },
4984         { "rx_error_bytes" },
4985         { "tx_bytes" },
4986         { "tx_error_bytes" },
4987         { "rx_ucast_packets" },
4988         { "rx_mcast_packets" },
4989         { "rx_bcast_packets" },
4990         { "tx_ucast_packets" },
4991         { "tx_mcast_packets" },
4992         { "tx_bcast_packets" },
4993         { "tx_mac_errors" },
4994         { "tx_carrier_errors" },
4995         { "rx_crc_errors" },
4996         { "rx_align_errors" },
4997         { "tx_single_collisions" },
4998         { "tx_multi_collisions" },
4999         { "tx_deferred" },
5000         { "tx_excess_collisions" },
5001         { "tx_late_collisions" },
5002         { "tx_total_collisions" },
5003         { "rx_fragments" },
5004         { "rx_jabbers" },
5005         { "rx_undersize_packets" },
5006         { "rx_oversize_packets" },
5007         { "rx_64_byte_packets" },
5008         { "rx_65_to_127_byte_packets" },
5009         { "rx_128_to_255_byte_packets" },
5010         { "rx_256_to_511_byte_packets" },
5011         { "rx_512_to_1023_byte_packets" },
5012         { "rx_1024_to_1522_byte_packets" },
5013         { "rx_1523_to_9022_byte_packets" },
5014         { "tx_64_byte_packets" },
5015         { "tx_65_to_127_byte_packets" },
5016         { "tx_128_to_255_byte_packets" },
5017         { "tx_256_to_511_byte_packets" },
5018         { "tx_512_to_1023_byte_packets" },
5019         { "tx_1024_to_1522_byte_packets" },
5020         { "tx_1523_to_9022_byte_packets" },
5021         { "rx_xon_frames" },
5022         { "rx_xoff_frames" },
5023         { "tx_xon_frames" },
5024         { "tx_xoff_frames" },
5025         { "rx_mac_ctrl_frames" },
5026         { "rx_filtered_packets" },
5027         { "rx_discards" },
5028 };
5029
5030 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5031
5032 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5033     STATS_OFFSET32(stat_IfHCInOctets_hi),
5034     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5035     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5036     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5037     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5038     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5039     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5040     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5041     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5042     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5043     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5044     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5045     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5046     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5047     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5048     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5049     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5050     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5051     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5052     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5053     STATS_OFFSET32(stat_EtherStatsFragments),                         
5054     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5055     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5056     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5057     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5058     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5059     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5060     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5061     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5062     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5063     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5064     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5065     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5066     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5067     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5068     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5069     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5070     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5071     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5072     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5073     STATS_OFFSET32(stat_OutXonSent),                                  
5074     STATS_OFFSET32(stat_OutXoffSent),                                 
5075     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5076     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5077     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5078 };
5079
5080 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5081  * skipped because of errata.
5082  */               
5083 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5084         8,0,8,8,8,8,8,8,8,8,
5085         4,0,4,4,4,4,4,4,4,4,
5086         4,4,4,4,4,4,4,4,4,4,
5087         4,4,4,4,4,4,4,4,4,4,
5088         4,4,4,4,4,
5089 };
5090
5091 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5092         8,0,8,8,8,8,8,8,8,8,
5093         4,4,4,4,4,4,4,4,4,4,
5094         4,4,4,4,4,4,4,4,4,4,
5095         4,4,4,4,4,4,4,4,4,4,
5096         4,4,4,4,4,
5097 };
5098
5099 #define BNX2_NUM_TESTS 6
5100
5101 static struct {
5102         char string[ETH_GSTRING_LEN];
5103 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5104         { "register_test (offline)" },
5105         { "memory_test (offline)" },
5106         { "loopback_test (offline)" },
5107         { "nvram_test (online)" },
5108         { "interrupt_test (online)" },
5109         { "link_test (online)" },
5110 };
5111
5112 static int
5113 bnx2_self_test_count(struct net_device *dev)
5114 {
5115         return BNX2_NUM_TESTS;
5116 }
5117
5118 static void
5119 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5120 {
5121         struct bnx2 *bp = netdev_priv(dev);
5122
5123         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5124         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5125                 bnx2_netif_stop(bp);
5126                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5127                 bnx2_free_skbs(bp);
5128
5129                 if (bnx2_test_registers(bp) != 0) {
5130                         buf[0] = 1;
5131                         etest->flags |= ETH_TEST_FL_FAILED;
5132                 }
5133                 if (bnx2_test_memory(bp) != 0) {
5134                         buf[1] = 1;
5135                         etest->flags |= ETH_TEST_FL_FAILED;
5136                 }
5137                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5138                         etest->flags |= ETH_TEST_FL_FAILED;
5139
5140                 if (!netif_running(bp->dev)) {
5141                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5142                 }
5143                 else {
5144                         bnx2_init_nic(bp);
5145                         bnx2_netif_start(bp);
5146                 }
5147
5148                 /* wait for link up */
5149                 msleep_interruptible(3000);
5150                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5151                         msleep_interruptible(4000);
5152         }
5153
5154         if (bnx2_test_nvram(bp) != 0) {
5155                 buf[3] = 1;
5156                 etest->flags |= ETH_TEST_FL_FAILED;
5157         }
5158         if (bnx2_test_intr(bp) != 0) {
5159                 buf[4] = 1;
5160                 etest->flags |= ETH_TEST_FL_FAILED;
5161         }
5162
5163         if (bnx2_test_link(bp) != 0) {
5164                 buf[5] = 1;
5165                 etest->flags |= ETH_TEST_FL_FAILED;
5166
5167         }
5168 }
5169
5170 static void
5171 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5172 {
5173         switch (stringset) {
5174         case ETH_SS_STATS:
5175                 memcpy(buf, bnx2_stats_str_arr,
5176                         sizeof(bnx2_stats_str_arr));
5177                 break;
5178         case ETH_SS_TEST:
5179                 memcpy(buf, bnx2_tests_str_arr,
5180                         sizeof(bnx2_tests_str_arr));
5181                 break;
5182         }
5183 }
5184
5185 static int
5186 bnx2_get_stats_count(struct net_device *dev)
5187 {
5188         return BNX2_NUM_STATS;
5189 }
5190
5191 static void
5192 bnx2_get_ethtool_stats(struct net_device *dev,
5193                 struct ethtool_stats *stats, u64 *buf)
5194 {
5195         struct bnx2 *bp = netdev_priv(dev);
5196         int i;
5197         u32 *hw_stats = (u32 *) bp->stats_blk;
5198         u8 *stats_len_arr = NULL;
5199
5200         if (hw_stats == NULL) {
5201                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5202                 return;
5203         }
5204
5205         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5206             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5207             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5208             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5209                 stats_len_arr = bnx2_5706_stats_len_arr;
5210         else
5211                 stats_len_arr = bnx2_5708_stats_len_arr;
5212
5213         for (i = 0; i < BNX2_NUM_STATS; i++) {
5214                 if (stats_len_arr[i] == 0) {
5215                         /* skip this counter */
5216                         buf[i] = 0;
5217                         continue;
5218                 }
5219                 if (stats_len_arr[i] == 4) {
5220                         /* 4-byte counter */
5221                         buf[i] = (u64)
5222                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5223                         continue;
5224                 }
5225                 /* 8-byte counter */
5226                 buf[i] = (((u64) *(hw_stats +
5227                                         bnx2_stats_offset_arr[i])) << 32) +
5228                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5229         }
5230 }
5231
5232 static int
5233 bnx2_phys_id(struct net_device *dev, u32 data)
5234 {
5235         struct bnx2 *bp = netdev_priv(dev);
5236         int i;
5237         u32 save;
5238
5239         if (data == 0)
5240                 data = 2;
5241
5242         save = REG_RD(bp, BNX2_MISC_CFG);
5243         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5244
5245         for (i = 0; i < (data * 2); i++) {
5246                 if ((i % 2) == 0) {
5247                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5248                 }
5249                 else {
5250                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5251                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5252                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5253                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5254                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5255                                 BNX2_EMAC_LED_TRAFFIC);
5256                 }
5257                 msleep_interruptible(500);
5258                 if (signal_pending(current))
5259                         break;
5260         }
5261         REG_WR(bp, BNX2_EMAC_LED, 0);
5262         REG_WR(bp, BNX2_MISC_CFG, save);
5263         return 0;
5264 }
5265
5266 static struct ethtool_ops bnx2_ethtool_ops = {
5267         .get_settings           = bnx2_get_settings,
5268         .set_settings           = bnx2_set_settings,
5269         .get_drvinfo            = bnx2_get_drvinfo,
5270         .get_regs_len           = bnx2_get_regs_len,
5271         .get_regs               = bnx2_get_regs,
5272         .get_wol                = bnx2_get_wol,
5273         .set_wol                = bnx2_set_wol,
5274         .nway_reset             = bnx2_nway_reset,
5275         .get_link               = ethtool_op_get_link,
5276         .get_eeprom_len         = bnx2_get_eeprom_len,
5277         .get_eeprom             = bnx2_get_eeprom,
5278         .set_eeprom             = bnx2_set_eeprom,
5279         .get_coalesce           = bnx2_get_coalesce,
5280         .set_coalesce           = bnx2_set_coalesce,
5281         .get_ringparam          = bnx2_get_ringparam,
5282         .set_ringparam          = bnx2_set_ringparam,
5283         .get_pauseparam         = bnx2_get_pauseparam,
5284         .set_pauseparam         = bnx2_set_pauseparam,
5285         .get_rx_csum            = bnx2_get_rx_csum,
5286         .set_rx_csum            = bnx2_set_rx_csum,
5287         .get_tx_csum            = ethtool_op_get_tx_csum,
5288         .set_tx_csum            = ethtool_op_set_tx_csum,
5289         .get_sg                 = ethtool_op_get_sg,
5290         .set_sg                 = ethtool_op_set_sg,
5291 #ifdef BCM_TSO
5292         .get_tso                = ethtool_op_get_tso,
5293         .set_tso                = ethtool_op_set_tso,
5294 #endif
5295         .self_test_count        = bnx2_self_test_count,
5296         .self_test              = bnx2_self_test,
5297         .get_strings            = bnx2_get_strings,
5298         .phys_id                = bnx2_phys_id,
5299         .get_stats_count        = bnx2_get_stats_count,
5300         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5301         .get_perm_addr          = ethtool_op_get_perm_addr,
5302 };
5303
5304 /* Called with rtnl_lock */
5305 static int
5306 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5307 {
5308         struct mii_ioctl_data *data = if_mii(ifr);
5309         struct bnx2 *bp = netdev_priv(dev);
5310         int err;
5311
5312         switch(cmd) {
5313         case SIOCGMIIPHY:
5314                 data->phy_id = bp->phy_addr;
5315
5316                 /* fallthru */
5317         case SIOCGMIIREG: {
5318                 u32 mii_regval;
5319
5320                 spin_lock_bh(&bp->phy_lock);
5321                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5322                 spin_unlock_bh(&bp->phy_lock);
5323
5324                 data->val_out = mii_regval;
5325
5326                 return err;
5327         }
5328
5329         case SIOCSMIIREG:
5330                 if (!capable(CAP_NET_ADMIN))
5331                         return -EPERM;
5332
5333                 spin_lock_bh(&bp->phy_lock);
5334                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5335                 spin_unlock_bh(&bp->phy_lock);
5336
5337                 return err;
5338
5339         default:
5340                 /* do nothing */
5341                 break;
5342         }
5343         return -EOPNOTSUPP;
5344 }
5345
5346 /* Called with rtnl_lock */
5347 static int
5348 bnx2_change_mac_addr(struct net_device *dev, void *p)
5349 {
5350         struct sockaddr *addr = p;
5351         struct bnx2 *bp = netdev_priv(dev);
5352
5353         if (!is_valid_ether_addr(addr->sa_data))
5354                 return -EINVAL;
5355
5356         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5357         if (netif_running(dev))
5358                 bnx2_set_mac_addr(bp);
5359
5360         return 0;
5361 }
5362
5363 /* Called with rtnl_lock */
5364 static int
5365 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5366 {
5367         struct bnx2 *bp = netdev_priv(dev);
5368
5369         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5370                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5371                 return -EINVAL;
5372
5373         dev->mtu = new_mtu;
5374         if (netif_running(dev)) {
5375                 bnx2_netif_stop(bp);
5376
5377                 bnx2_init_nic(bp);
5378
5379                 bnx2_netif_start(bp);
5380         }
5381         return 0;
5382 }
5383
5384 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5385 static void
5386 poll_bnx2(struct net_device *dev)
5387 {
5388         struct bnx2 *bp = netdev_priv(dev);
5389
5390         disable_irq(bp->pdev->irq);
5391         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5392         enable_irq(bp->pdev->irq);
5393 }
5394 #endif
5395
5396 static int __devinit
5397 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5398 {
5399         struct bnx2 *bp;
5400         unsigned long mem_len;
5401         int rc;
5402         u32 reg;
5403
5404         SET_MODULE_OWNER(dev);
5405         SET_NETDEV_DEV(dev, &pdev->dev);
5406         bp = netdev_priv(dev);
5407
5408         bp->flags = 0;
5409         bp->phy_flags = 0;
5410
5411         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5412         rc = pci_enable_device(pdev);
5413         if (rc) {
5414                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5415                 goto err_out;
5416         }
5417
5418         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5419                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5420                        "aborting.\n");
5421                 rc = -ENODEV;
5422                 goto err_out_disable;
5423         }
5424
5425         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5426         if (rc) {
5427                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5428                 goto err_out_disable;
5429         }
5430
5431         pci_set_master(pdev);
5432
5433         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5434         if (bp->pm_cap == 0) {
5435                 printk(KERN_ERR PFX "Cannot find power management capability, "
5436                                "aborting.\n");
5437                 rc = -EIO;
5438                 goto err_out_release;
5439         }
5440
5441         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5442         if (bp->pcix_cap == 0) {
5443                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5444                 rc = -EIO;
5445                 goto err_out_release;
5446         }
5447
5448         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5449                 bp->flags |= USING_DAC_FLAG;
5450                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5451                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5452                                "failed, aborting.\n");
5453                         rc = -EIO;
5454                         goto err_out_release;
5455                 }
5456         }
5457         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5458                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5459                 rc = -EIO;
5460                 goto err_out_release;
5461         }
5462
5463         bp->dev = dev;
5464         bp->pdev = pdev;
5465
5466         spin_lock_init(&bp->phy_lock);
5467         spin_lock_init(&bp->tx_lock);
5468         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5469
5470         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5471         mem_len = MB_GET_CID_ADDR(17);
5472         dev->mem_end = dev->mem_start + mem_len;
5473         dev->irq = pdev->irq;
5474
5475         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5476
5477         if (!bp->regview) {
5478                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5479                 rc = -ENOMEM;
5480                 goto err_out_release;
5481         }
5482
5483         /* Configure byte swap and enable write to the reg_window registers.
5484          * Rely on CPU to do target byte swapping on big endian systems
5485          * The chip's target access swapping will not swap all accesses
5486          */
5487         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5488                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5489                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5490
5491         bnx2_set_power_state(bp, PCI_D0);
5492
5493         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5494
5495         /* Get bus information. */
5496         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5497         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5498                 u32 clkreg;
5499
5500                 bp->flags |= PCIX_FLAG;
5501
5502                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5503                 
5504                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5505                 switch (clkreg) {
5506                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5507                         bp->bus_speed_mhz = 133;
5508                         break;
5509
5510                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5511                         bp->bus_speed_mhz = 100;
5512                         break;
5513
5514                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5515                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5516                         bp->bus_speed_mhz = 66;
5517                         break;
5518
5519                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5520                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5521                         bp->bus_speed_mhz = 50;
5522                         break;
5523
5524                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5525                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5526                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5527                         bp->bus_speed_mhz = 33;
5528                         break;
5529                 }
5530         }
5531         else {
5532                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5533                         bp->bus_speed_mhz = 66;
5534                 else
5535                         bp->bus_speed_mhz = 33;
5536         }
5537
5538         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5539                 bp->flags |= PCI_32BIT_FLAG;
5540
5541         /* 5706A0 may falsely detect SERR and PERR. */
5542         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5543                 reg = REG_RD(bp, PCI_COMMAND);
5544                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5545                 REG_WR(bp, PCI_COMMAND, reg);
5546         }
5547         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5548                 !(bp->flags & PCIX_FLAG)) {
5549
5550                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5551                        "aborting.\n");
5552                 goto err_out_unmap;
5553         }
5554
5555         bnx2_init_nvram(bp);
5556
5557         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5558
5559         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5560             BNX2_SHM_HDR_SIGNATURE_SIG)
5561                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5562         else
5563                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5564
5565         /* Get the permanent MAC address.  First we need to make sure the
5566          * firmware is actually running.
5567          */
5568         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5569
5570         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5571             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5572                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5573                 rc = -ENODEV;
5574                 goto err_out_unmap;
5575         }
5576
5577         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5578
5579         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5580         bp->mac_addr[0] = (u8) (reg >> 8);
5581         bp->mac_addr[1] = (u8) reg;
5582
5583         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5584         bp->mac_addr[2] = (u8) (reg >> 24);
5585         bp->mac_addr[3] = (u8) (reg >> 16);
5586         bp->mac_addr[4] = (u8) (reg >> 8);
5587         bp->mac_addr[5] = (u8) reg;
5588
5589         bp->tx_ring_size = MAX_TX_DESC_CNT;
5590         bnx2_set_rx_ring_size(bp, 100);
5591
5592         bp->rx_csum = 1;
5593
5594         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5595
5596         bp->tx_quick_cons_trip_int = 20;
5597         bp->tx_quick_cons_trip = 20;
5598         bp->tx_ticks_int = 80;
5599         bp->tx_ticks = 80;
5600                 
5601         bp->rx_quick_cons_trip_int = 6;
5602         bp->rx_quick_cons_trip = 6;
5603         bp->rx_ticks_int = 18;
5604         bp->rx_ticks = 18;
5605
5606         bp->stats_ticks = 1000000 & 0xffff00;
5607
5608         bp->timer_interval =  HZ;
5609         bp->current_interval =  HZ;
5610
5611         bp->phy_addr = 1;
5612
5613         /* Disable WOL support if we are running on a SERDES chip. */
5614         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5615                 bp->phy_flags |= PHY_SERDES_FLAG;
5616                 bp->flags |= NO_WOL_FLAG;
5617                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5618                         bp->phy_addr = 2;
5619                         reg = REG_RD_IND(bp, bp->shmem_base +
5620                                          BNX2_SHARED_HW_CFG_CONFIG);
5621                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5622                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5623                 }
5624         }
5625
5626         if (CHIP_NUM(bp) == CHIP_NUM_5708)
5627                 bp->flags |= NO_WOL_FLAG;
5628
5629         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5630                 bp->tx_quick_cons_trip_int =
5631                         bp->tx_quick_cons_trip;
5632                 bp->tx_ticks_int = bp->tx_ticks;
5633                 bp->rx_quick_cons_trip_int =
5634                         bp->rx_quick_cons_trip;
5635                 bp->rx_ticks_int = bp->rx_ticks;
5636                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5637                 bp->com_ticks_int = bp->com_ticks;
5638                 bp->cmd_ticks_int = bp->cmd_ticks;
5639         }
5640
5641         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5642         bp->req_line_speed = 0;
5643         if (bp->phy_flags & PHY_SERDES_FLAG) {
5644                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5645
5646                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5647                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5648                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5649                         bp->autoneg = 0;
5650                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5651                         bp->req_duplex = DUPLEX_FULL;
5652                 }
5653         }
5654         else {
5655                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5656         }
5657
5658         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5659
5660         init_timer(&bp->timer);
5661         bp->timer.expires = RUN_AT(bp->timer_interval);
5662         bp->timer.data = (unsigned long) bp;
5663         bp->timer.function = bnx2_timer;
5664
5665         return 0;
5666
5667 err_out_unmap:
5668         if (bp->regview) {
5669                 iounmap(bp->regview);
5670                 bp->regview = NULL;
5671         }
5672
5673 err_out_release:
5674         pci_release_regions(pdev);
5675
5676 err_out_disable:
5677         pci_disable_device(pdev);
5678         pci_set_drvdata(pdev, NULL);
5679
5680 err_out:
5681         return rc;
5682 }
5683
5684 static int __devinit
5685 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5686 {
5687         static int version_printed = 0;
5688         struct net_device *dev = NULL;
5689         struct bnx2 *bp;
5690         int rc, i;
5691
5692         if (version_printed++ == 0)
5693                 printk(KERN_INFO "%s", version);
5694
5695         /* dev zeroed in init_etherdev */
5696         dev = alloc_etherdev(sizeof(*bp));
5697
5698         if (!dev)
5699                 return -ENOMEM;
5700
5701         rc = bnx2_init_board(pdev, dev);
5702         if (rc < 0) {
5703                 free_netdev(dev);
5704                 return rc;
5705         }
5706
5707         dev->open = bnx2_open;
5708         dev->hard_start_xmit = bnx2_start_xmit;
5709         dev->stop = bnx2_close;
5710         dev->get_stats = bnx2_get_stats;
5711         dev->set_multicast_list = bnx2_set_rx_mode;
5712         dev->do_ioctl = bnx2_ioctl;
5713         dev->set_mac_address = bnx2_change_mac_addr;
5714         dev->change_mtu = bnx2_change_mtu;
5715         dev->tx_timeout = bnx2_tx_timeout;
5716         dev->watchdog_timeo = TX_TIMEOUT;
5717 #ifdef BCM_VLAN
5718         dev->vlan_rx_register = bnx2_vlan_rx_register;
5719         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5720 #endif
5721         dev->poll = bnx2_poll;
5722         dev->ethtool_ops = &bnx2_ethtool_ops;
5723         dev->weight = 64;
5724
5725         bp = netdev_priv(dev);
5726
5727 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5728         dev->poll_controller = poll_bnx2;
5729 #endif
5730
5731         if ((rc = register_netdev(dev))) {
5732                 printk(KERN_ERR PFX "Cannot register net device\n");
5733                 if (bp->regview)
5734                         iounmap(bp->regview);
5735                 pci_release_regions(pdev);
5736                 pci_disable_device(pdev);
5737                 pci_set_drvdata(pdev, NULL);
5738                 free_netdev(dev);
5739                 return rc;
5740         }
5741
5742         pci_set_drvdata(pdev, dev);
5743
5744         memcpy(dev->dev_addr, bp->mac_addr, 6);
5745         memcpy(dev->perm_addr, bp->mac_addr, 6);
5746         bp->name = board_info[ent->driver_data].name,
5747         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5748                 "IRQ %d, ",
5749                 dev->name,
5750                 bp->name,
5751                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5752                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5753                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5754                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5755                 bp->bus_speed_mhz,
5756                 dev->base_addr,
5757                 bp->pdev->irq);
5758
5759         printk("node addr ");
5760         for (i = 0; i < 6; i++)
5761                 printk("%2.2x", dev->dev_addr[i]);
5762         printk("\n");
5763
5764         dev->features |= NETIF_F_SG;
5765         if (bp->flags & USING_DAC_FLAG)
5766                 dev->features |= NETIF_F_HIGHDMA;
5767         dev->features |= NETIF_F_IP_CSUM;
5768 #ifdef BCM_VLAN
5769         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5770 #endif
5771 #ifdef BCM_TSO
5772         dev->features |= NETIF_F_TSO;
5773 #endif
5774
5775         netif_carrier_off(bp->dev);
5776
5777         return 0;
5778 }
5779
5780 static void __devexit
5781 bnx2_remove_one(struct pci_dev *pdev)
5782 {
5783         struct net_device *dev = pci_get_drvdata(pdev);
5784         struct bnx2 *bp = netdev_priv(dev);
5785
5786         flush_scheduled_work();
5787
5788         unregister_netdev(dev);
5789
5790         if (bp->regview)
5791                 iounmap(bp->regview);
5792
5793         free_netdev(dev);
5794         pci_release_regions(pdev);
5795         pci_disable_device(pdev);
5796         pci_set_drvdata(pdev, NULL);
5797 }
5798
5799 static int
5800 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5801 {
5802         struct net_device *dev = pci_get_drvdata(pdev);
5803         struct bnx2 *bp = netdev_priv(dev);
5804         u32 reset_code;
5805
5806         if (!netif_running(dev))
5807                 return 0;
5808
5809         flush_scheduled_work();
5810         bnx2_netif_stop(bp);
5811         netif_device_detach(dev);
5812         del_timer_sync(&bp->timer);
5813         if (bp->flags & NO_WOL_FLAG)
5814                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5815         else if (bp->wol)
5816                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5817         else
5818                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5819         bnx2_reset_chip(bp, reset_code);
5820         bnx2_free_skbs(bp);
5821         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5822         return 0;
5823 }
5824
5825 static int
5826 bnx2_resume(struct pci_dev *pdev)
5827 {
5828         struct net_device *dev = pci_get_drvdata(pdev);
5829         struct bnx2 *bp = netdev_priv(dev);
5830
5831         if (!netif_running(dev))
5832                 return 0;
5833
5834         bnx2_set_power_state(bp, PCI_D0);
5835         netif_device_attach(dev);
5836         bnx2_init_nic(bp);
5837         bnx2_netif_start(bp);
5838         return 0;
5839 }
5840
5841 static struct pci_driver bnx2_pci_driver = {
5842         .name           = DRV_MODULE_NAME,
5843         .id_table       = bnx2_pci_tbl,
5844         .probe          = bnx2_init_one,
5845         .remove         = __devexit_p(bnx2_remove_one),
5846         .suspend        = bnx2_suspend,
5847         .resume         = bnx2_resume,
5848 };
5849
5850 static int __init bnx2_init(void)
5851 {
5852         return pci_module_init(&bnx2_pci_driver);
5853 }
5854
5855 static void __exit bnx2_cleanup(void)
5856 {
5857         pci_unregister_driver(&bnx2_pci_driver);
5858 }
5859
5860 module_init(bnx2_init);
5861 module_exit(bnx2_cleanup);
5862
5863
5864