[BNX2]: Fix link change handling
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
14
15 #define DRV_MODULE_NAME         "bnx2"
16 #define PFX DRV_MODULE_NAME     ": "
17 #define DRV_MODULE_VERSION      "1.4.38"
18 #define DRV_MODULE_RELDATE      "February 10, 2006"
19
20 #define RUN_AT(x) (jiffies + (x))
21
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT  (5*HZ)
24
25 static char version[] __devinitdata =
26         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
32
33 static int disable_msi = 0;
34
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38 typedef enum {
39         BCM5706 = 0,
40         NC370T,
41         NC370I,
42         BCM5706S,
43         NC370F,
44         BCM5708,
45         BCM5708S,
46 } board_t;
47
48 /* indexed by board_t, above */
49 static const struct {
50         char *name;
51 } board_info[] __devinitdata = {
52         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53         { "HP NC370T Multifunction Gigabit Server Adapter" },
54         { "HP NC370i Multifunction Gigabit Server Adapter" },
55         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56         { "HP NC370F Multifunction Gigabit Server Adapter" },
57         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
59         };
60
61 static struct pci_device_id bnx2_pci_tbl[] = {
62         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
76         { 0, }
77 };
78
79 static struct flash_spec flash_table[] =
80 {
81         /* Slow EEPROM */
82         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85          "EEPROM - slow"},
86         /* Expansion entry 0001 */
87         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90          "Entry 0001"},
91         /* Saifun SA25F010 (non-buffered flash) */
92         /* strap, cfg1, & write1 need updates */
93         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96          "Non-buffered flash (128kB)"},
97         /* Saifun SA25F020 (non-buffered flash) */
98         /* strap, cfg1, & write1 need updates */
99         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102          "Non-buffered flash (256kB)"},
103         /* Expansion entry 0100 */
104         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107          "Entry 0100"},
108         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
110          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118         /* Saifun SA25F005 (non-buffered flash) */
119         /* strap, cfg1, & write1 need updates */
120         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123          "Non-buffered flash (64kB)"},
124         /* Fast EEPROM */
125         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128          "EEPROM - fast"},
129         /* Expansion entry 1001 */
130         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133          "Entry 1001"},
134         /* Expansion entry 1010 */
135         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 1010"},
139         /* ATMEL AT45DB011B (buffered flash) */
140         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143          "Buffered flash (128kB)"},
144         /* Expansion entry 1100 */
145         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148          "Entry 1100"},
149         /* Expansion entry 1101 */
150         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153          "Entry 1101"},
154         /* Ateml Expansion entry 1110 */
155         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158          "Entry 1110 (Atmel)"},
159         /* ATMEL AT45DB021B (buffered flash) */
160         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163          "Buffered flash (256kB)"},
164 };
165
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169 {
170         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172         if (diff > MAX_TX_DESC_CNT)
173                 diff = (diff & MAX_TX_DESC_CNT) - 1;
174         return (bp->tx_ring_size - diff);
175 }
176
177 static u32
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179 {
180         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182 }
183
184 static void
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186 {
187         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189 }
190
191 static void
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193 {
194         offset += cid_addr;
195         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196         REG_WR(bp, BNX2_CTX_DATA, val);
197 }
198
199 static int
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201 {
202         u32 val1;
203         int i, ret;
204
205         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212                 udelay(40);
213         }
214
215         val1 = (bp->phy_addr << 21) | (reg << 16) |
216                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217                 BNX2_EMAC_MDIO_COMM_START_BUSY;
218         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220         for (i = 0; i < 50; i++) {
221                 udelay(10);
222
223                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225                         udelay(5);
226
227                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230                         break;
231                 }
232         }
233
234         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235                 *val = 0x0;
236                 ret = -EBUSY;
237         }
238         else {
239                 *val = val1;
240                 ret = 0;
241         }
242
243         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250                 udelay(40);
251         }
252
253         return ret;
254 }
255
256 static int
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258 {
259         u32 val1;
260         int i, ret;
261
262         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269                 udelay(40);
270         }
271
272         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276     
277         for (i = 0; i < 50; i++) {
278                 udelay(10);
279
280                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282                         udelay(5);
283                         break;
284                 }
285         }
286
287         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288                 ret = -EBUSY;
289         else
290                 ret = 0;
291
292         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299                 udelay(40);
300         }
301
302         return ret;
303 }
304
305 static void
306 bnx2_disable_int(struct bnx2 *bp)
307 {
308         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311 }
312
313 static void
314 bnx2_enable_int(struct bnx2 *bp)
315 {
316         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
317                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
318                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
319
320         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
321                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
322
323         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
324 }
325
326 static void
327 bnx2_disable_int_sync(struct bnx2 *bp)
328 {
329         atomic_inc(&bp->intr_sem);
330         bnx2_disable_int(bp);
331         synchronize_irq(bp->pdev->irq);
332 }
333
334 static void
335 bnx2_netif_stop(struct bnx2 *bp)
336 {
337         bnx2_disable_int_sync(bp);
338         if (netif_running(bp->dev)) {
339                 netif_poll_disable(bp->dev);
340                 netif_tx_disable(bp->dev);
341                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
342         }
343 }
344
345 static void
346 bnx2_netif_start(struct bnx2 *bp)
347 {
348         if (atomic_dec_and_test(&bp->intr_sem)) {
349                 if (netif_running(bp->dev)) {
350                         netif_wake_queue(bp->dev);
351                         netif_poll_enable(bp->dev);
352                         bnx2_enable_int(bp);
353                 }
354         }
355 }
356
357 static void
358 bnx2_free_mem(struct bnx2 *bp)
359 {
360         int i;
361
362         if (bp->stats_blk) {
363                 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
364                                     bp->stats_blk, bp->stats_blk_mapping);
365                 bp->stats_blk = NULL;
366         }
367         if (bp->status_blk) {
368                 pci_free_consistent(bp->pdev, sizeof(struct status_block),
369                                     bp->status_blk, bp->status_blk_mapping);
370                 bp->status_blk = NULL;
371         }
372         if (bp->tx_desc_ring) {
373                 pci_free_consistent(bp->pdev,
374                                     sizeof(struct tx_bd) * TX_DESC_CNT,
375                                     bp->tx_desc_ring, bp->tx_desc_mapping);
376                 bp->tx_desc_ring = NULL;
377         }
378         kfree(bp->tx_buf_ring);
379         bp->tx_buf_ring = NULL;
380         for (i = 0; i < bp->rx_max_ring; i++) {
381                 if (bp->rx_desc_ring[i])
382                         pci_free_consistent(bp->pdev,
383                                             sizeof(struct rx_bd) * RX_DESC_CNT,
384                                             bp->rx_desc_ring[i],
385                                             bp->rx_desc_mapping[i]);
386                 bp->rx_desc_ring[i] = NULL;
387         }
388         vfree(bp->rx_buf_ring);
389         bp->rx_buf_ring = NULL;
390 }
391
392 static int
393 bnx2_alloc_mem(struct bnx2 *bp)
394 {
395         int i;
396
397         bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
398                                      GFP_KERNEL);
399         if (bp->tx_buf_ring == NULL)
400                 return -ENOMEM;
401
402         memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
403         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
404                                                 sizeof(struct tx_bd) *
405                                                 TX_DESC_CNT,
406                                                 &bp->tx_desc_mapping);
407         if (bp->tx_desc_ring == NULL)
408                 goto alloc_mem_err;
409
410         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
411                                   bp->rx_max_ring);
412         if (bp->rx_buf_ring == NULL)
413                 goto alloc_mem_err;
414
415         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
416                                    bp->rx_max_ring);
417
418         for (i = 0; i < bp->rx_max_ring; i++) {
419                 bp->rx_desc_ring[i] =
420                         pci_alloc_consistent(bp->pdev,
421                                              sizeof(struct rx_bd) * RX_DESC_CNT,
422                                              &bp->rx_desc_mapping[i]);
423                 if (bp->rx_desc_ring[i] == NULL)
424                         goto alloc_mem_err;
425
426         }
427
428         bp->status_blk = pci_alloc_consistent(bp->pdev,
429                                               sizeof(struct status_block),
430                                               &bp->status_blk_mapping);
431         if (bp->status_blk == NULL)
432                 goto alloc_mem_err;
433
434         memset(bp->status_blk, 0, sizeof(struct status_block));
435
436         bp->stats_blk = pci_alloc_consistent(bp->pdev,
437                                              sizeof(struct statistics_block),
438                                              &bp->stats_blk_mapping);
439         if (bp->stats_blk == NULL)
440                 goto alloc_mem_err;
441
442         memset(bp->stats_blk, 0, sizeof(struct statistics_block));
443
444         return 0;
445
446 alloc_mem_err:
447         bnx2_free_mem(bp);
448         return -ENOMEM;
449 }
450
451 static void
452 bnx2_report_fw_link(struct bnx2 *bp)
453 {
454         u32 fw_link_status = 0;
455
456         if (bp->link_up) {
457                 u32 bmsr;
458
459                 switch (bp->line_speed) {
460                 case SPEED_10:
461                         if (bp->duplex == DUPLEX_HALF)
462                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
463                         else
464                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
465                         break;
466                 case SPEED_100:
467                         if (bp->duplex == DUPLEX_HALF)
468                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
469                         else
470                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
471                         break;
472                 case SPEED_1000:
473                         if (bp->duplex == DUPLEX_HALF)
474                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
475                         else
476                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
477                         break;
478                 case SPEED_2500:
479                         if (bp->duplex == DUPLEX_HALF)
480                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
481                         else
482                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
483                         break;
484                 }
485
486                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
487
488                 if (bp->autoneg) {
489                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
490
491                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
492                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
493
494                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
495                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
496                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
497                         else
498                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
499                 }
500         }
501         else
502                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
503
504         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
505 }
506
507 static void
508 bnx2_report_link(struct bnx2 *bp)
509 {
510         if (bp->link_up) {
511                 netif_carrier_on(bp->dev);
512                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
513
514                 printk("%d Mbps ", bp->line_speed);
515
516                 if (bp->duplex == DUPLEX_FULL)
517                         printk("full duplex");
518                 else
519                         printk("half duplex");
520
521                 if (bp->flow_ctrl) {
522                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
523                                 printk(", receive ");
524                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
525                                         printk("& transmit ");
526                         }
527                         else {
528                                 printk(", transmit ");
529                         }
530                         printk("flow control ON");
531                 }
532                 printk("\n");
533         }
534         else {
535                 netif_carrier_off(bp->dev);
536                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
537         }
538
539         bnx2_report_fw_link(bp);
540 }
541
542 static void
543 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
544 {
545         u32 local_adv, remote_adv;
546
547         bp->flow_ctrl = 0;
548         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
549                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
550
551                 if (bp->duplex == DUPLEX_FULL) {
552                         bp->flow_ctrl = bp->req_flow_ctrl;
553                 }
554                 return;
555         }
556
557         if (bp->duplex != DUPLEX_FULL) {
558                 return;
559         }
560
561         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
562             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
563                 u32 val;
564
565                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
566                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
567                         bp->flow_ctrl |= FLOW_CTRL_TX;
568                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
569                         bp->flow_ctrl |= FLOW_CTRL_RX;
570                 return;
571         }
572
573         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
574         bnx2_read_phy(bp, MII_LPA, &remote_adv);
575
576         if (bp->phy_flags & PHY_SERDES_FLAG) {
577                 u32 new_local_adv = 0;
578                 u32 new_remote_adv = 0;
579
580                 if (local_adv & ADVERTISE_1000XPAUSE)
581                         new_local_adv |= ADVERTISE_PAUSE_CAP;
582                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
583                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
584                 if (remote_adv & ADVERTISE_1000XPAUSE)
585                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
586                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
587                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
588
589                 local_adv = new_local_adv;
590                 remote_adv = new_remote_adv;
591         }
592
593         /* See Table 28B-3 of 802.3ab-1999 spec. */
594         if (local_adv & ADVERTISE_PAUSE_CAP) {
595                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
596                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
597                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
598                         }
599                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
600                                 bp->flow_ctrl = FLOW_CTRL_RX;
601                         }
602                 }
603                 else {
604                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
605                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
606                         }
607                 }
608         }
609         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
610                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
611                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
612
613                         bp->flow_ctrl = FLOW_CTRL_TX;
614                 }
615         }
616 }
617
618 static int
619 bnx2_5708s_linkup(struct bnx2 *bp)
620 {
621         u32 val;
622
623         bp->link_up = 1;
624         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
625         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
626                 case BCM5708S_1000X_STAT1_SPEED_10:
627                         bp->line_speed = SPEED_10;
628                         break;
629                 case BCM5708S_1000X_STAT1_SPEED_100:
630                         bp->line_speed = SPEED_100;
631                         break;
632                 case BCM5708S_1000X_STAT1_SPEED_1G:
633                         bp->line_speed = SPEED_1000;
634                         break;
635                 case BCM5708S_1000X_STAT1_SPEED_2G5:
636                         bp->line_speed = SPEED_2500;
637                         break;
638         }
639         if (val & BCM5708S_1000X_STAT1_FD)
640                 bp->duplex = DUPLEX_FULL;
641         else
642                 bp->duplex = DUPLEX_HALF;
643
644         return 0;
645 }
646
647 static int
648 bnx2_5706s_linkup(struct bnx2 *bp)
649 {
650         u32 bmcr, local_adv, remote_adv, common;
651
652         bp->link_up = 1;
653         bp->line_speed = SPEED_1000;
654
655         bnx2_read_phy(bp, MII_BMCR, &bmcr);
656         if (bmcr & BMCR_FULLDPLX) {
657                 bp->duplex = DUPLEX_FULL;
658         }
659         else {
660                 bp->duplex = DUPLEX_HALF;
661         }
662
663         if (!(bmcr & BMCR_ANENABLE)) {
664                 return 0;
665         }
666
667         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
668         bnx2_read_phy(bp, MII_LPA, &remote_adv);
669
670         common = local_adv & remote_adv;
671         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
672
673                 if (common & ADVERTISE_1000XFULL) {
674                         bp->duplex = DUPLEX_FULL;
675                 }
676                 else {
677                         bp->duplex = DUPLEX_HALF;
678                 }
679         }
680
681         return 0;
682 }
683
684 static int
685 bnx2_copper_linkup(struct bnx2 *bp)
686 {
687         u32 bmcr;
688
689         bnx2_read_phy(bp, MII_BMCR, &bmcr);
690         if (bmcr & BMCR_ANENABLE) {
691                 u32 local_adv, remote_adv, common;
692
693                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
694                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
695
696                 common = local_adv & (remote_adv >> 2);
697                 if (common & ADVERTISE_1000FULL) {
698                         bp->line_speed = SPEED_1000;
699                         bp->duplex = DUPLEX_FULL;
700                 }
701                 else if (common & ADVERTISE_1000HALF) {
702                         bp->line_speed = SPEED_1000;
703                         bp->duplex = DUPLEX_HALF;
704                 }
705                 else {
706                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
707                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
708
709                         common = local_adv & remote_adv;
710                         if (common & ADVERTISE_100FULL) {
711                                 bp->line_speed = SPEED_100;
712                                 bp->duplex = DUPLEX_FULL;
713                         }
714                         else if (common & ADVERTISE_100HALF) {
715                                 bp->line_speed = SPEED_100;
716                                 bp->duplex = DUPLEX_HALF;
717                         }
718                         else if (common & ADVERTISE_10FULL) {
719                                 bp->line_speed = SPEED_10;
720                                 bp->duplex = DUPLEX_FULL;
721                         }
722                         else if (common & ADVERTISE_10HALF) {
723                                 bp->line_speed = SPEED_10;
724                                 bp->duplex = DUPLEX_HALF;
725                         }
726                         else {
727                                 bp->line_speed = 0;
728                                 bp->link_up = 0;
729                         }
730                 }
731         }
732         else {
733                 if (bmcr & BMCR_SPEED100) {
734                         bp->line_speed = SPEED_100;
735                 }
736                 else {
737                         bp->line_speed = SPEED_10;
738                 }
739                 if (bmcr & BMCR_FULLDPLX) {
740                         bp->duplex = DUPLEX_FULL;
741                 }
742                 else {
743                         bp->duplex = DUPLEX_HALF;
744                 }
745         }
746
747         return 0;
748 }
749
750 static int
751 bnx2_set_mac_link(struct bnx2 *bp)
752 {
753         u32 val;
754
755         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
756         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
757                 (bp->duplex == DUPLEX_HALF)) {
758                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
759         }
760
761         /* Configure the EMAC mode register. */
762         val = REG_RD(bp, BNX2_EMAC_MODE);
763
764         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
765                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
766                 BNX2_EMAC_MODE_25G);
767
768         if (bp->link_up) {
769                 switch (bp->line_speed) {
770                         case SPEED_10:
771                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
772                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
773                                         break;
774                                 }
775                                 /* fall through */
776                         case SPEED_100:
777                                 val |= BNX2_EMAC_MODE_PORT_MII;
778                                 break;
779                         case SPEED_2500:
780                                 val |= BNX2_EMAC_MODE_25G;
781                                 /* fall through */
782                         case SPEED_1000:
783                                 val |= BNX2_EMAC_MODE_PORT_GMII;
784                                 break;
785                 }
786         }
787         else {
788                 val |= BNX2_EMAC_MODE_PORT_GMII;
789         }
790
791         /* Set the MAC to operate in the appropriate duplex mode. */
792         if (bp->duplex == DUPLEX_HALF)
793                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
794         REG_WR(bp, BNX2_EMAC_MODE, val);
795
796         /* Enable/disable rx PAUSE. */
797         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
798
799         if (bp->flow_ctrl & FLOW_CTRL_RX)
800                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
801         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
802
803         /* Enable/disable tx PAUSE. */
804         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
805         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
806
807         if (bp->flow_ctrl & FLOW_CTRL_TX)
808                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
809         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
810
811         /* Acknowledge the interrupt. */
812         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
813
814         return 0;
815 }
816
817 static int
818 bnx2_set_link(struct bnx2 *bp)
819 {
820         u32 bmsr;
821         u8 link_up;
822
823         if (bp->loopback == MAC_LOOPBACK) {
824                 bp->link_up = 1;
825                 return 0;
826         }
827
828         link_up = bp->link_up;
829
830         bnx2_read_phy(bp, MII_BMSR, &bmsr);
831         bnx2_read_phy(bp, MII_BMSR, &bmsr);
832
833         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
834             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
835                 u32 val;
836
837                 val = REG_RD(bp, BNX2_EMAC_STATUS);
838                 if (val & BNX2_EMAC_STATUS_LINK)
839                         bmsr |= BMSR_LSTATUS;
840                 else
841                         bmsr &= ~BMSR_LSTATUS;
842         }
843
844         if (bmsr & BMSR_LSTATUS) {
845                 bp->link_up = 1;
846
847                 if (bp->phy_flags & PHY_SERDES_FLAG) {
848                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
849                                 bnx2_5706s_linkup(bp);
850                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
851                                 bnx2_5708s_linkup(bp);
852                 }
853                 else {
854                         bnx2_copper_linkup(bp);
855                 }
856                 bnx2_resolve_flow_ctrl(bp);
857         }
858         else {
859                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
860                         (bp->autoneg & AUTONEG_SPEED)) {
861
862                         u32 bmcr;
863
864                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
865                         if (!(bmcr & BMCR_ANENABLE)) {
866                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
867                                         BMCR_ANENABLE);
868                         }
869                 }
870                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
871                 bp->link_up = 0;
872         }
873
874         if (bp->link_up != link_up) {
875                 bnx2_report_link(bp);
876         }
877
878         bnx2_set_mac_link(bp);
879
880         return 0;
881 }
882
883 static int
884 bnx2_reset_phy(struct bnx2 *bp)
885 {
886         int i;
887         u32 reg;
888
889         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
890
891 #define PHY_RESET_MAX_WAIT 100
892         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
893                 udelay(10);
894
895                 bnx2_read_phy(bp, MII_BMCR, &reg);
896                 if (!(reg & BMCR_RESET)) {
897                         udelay(20);
898                         break;
899                 }
900         }
901         if (i == PHY_RESET_MAX_WAIT) {
902                 return -EBUSY;
903         }
904         return 0;
905 }
906
907 static u32
908 bnx2_phy_get_pause_adv(struct bnx2 *bp)
909 {
910         u32 adv = 0;
911
912         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
913                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
914
915                 if (bp->phy_flags & PHY_SERDES_FLAG) {
916                         adv = ADVERTISE_1000XPAUSE;
917                 }
918                 else {
919                         adv = ADVERTISE_PAUSE_CAP;
920                 }
921         }
922         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
923                 if (bp->phy_flags & PHY_SERDES_FLAG) {
924                         adv = ADVERTISE_1000XPSE_ASYM;
925                 }
926                 else {
927                         adv = ADVERTISE_PAUSE_ASYM;
928                 }
929         }
930         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
931                 if (bp->phy_flags & PHY_SERDES_FLAG) {
932                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
933                 }
934                 else {
935                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
936                 }
937         }
938         return adv;
939 }
940
941 static int
942 bnx2_setup_serdes_phy(struct bnx2 *bp)
943 {
944         u32 adv, bmcr, up1;
945         u32 new_adv = 0;
946
947         if (!(bp->autoneg & AUTONEG_SPEED)) {
948                 u32 new_bmcr;
949                 int force_link_down = 0;
950
951                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
952                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
953                         if (up1 & BCM5708S_UP1_2G5) {
954                                 up1 &= ~BCM5708S_UP1_2G5;
955                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
956                                 force_link_down = 1;
957                         }
958                 }
959
960                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
961                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
962
963                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
964                 new_bmcr = bmcr & ~BMCR_ANENABLE;
965                 new_bmcr |= BMCR_SPEED1000;
966                 if (bp->req_duplex == DUPLEX_FULL) {
967                         adv |= ADVERTISE_1000XFULL;
968                         new_bmcr |= BMCR_FULLDPLX;
969                 }
970                 else {
971                         adv |= ADVERTISE_1000XHALF;
972                         new_bmcr &= ~BMCR_FULLDPLX;
973                 }
974                 if ((new_bmcr != bmcr) || (force_link_down)) {
975                         /* Force a link down visible on the other side */
976                         if (bp->link_up) {
977                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
978                                                ~(ADVERTISE_1000XFULL |
979                                                  ADVERTISE_1000XHALF));
980                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
981                                         BMCR_ANRESTART | BMCR_ANENABLE);
982
983                                 bp->link_up = 0;
984                                 netif_carrier_off(bp->dev);
985                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
986                         }
987                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
988                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
989                 }
990                 return 0;
991         }
992
993         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
994                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
995                 up1 |= BCM5708S_UP1_2G5;
996                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
997         }
998
999         if (bp->advertising & ADVERTISED_1000baseT_Full)
1000                 new_adv |= ADVERTISE_1000XFULL;
1001
1002         new_adv |= bnx2_phy_get_pause_adv(bp);
1003
1004         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1005         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1006
1007         bp->serdes_an_pending = 0;
1008         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1009                 /* Force a link down visible on the other side */
1010                 if (bp->link_up) {
1011                         int i;
1012
1013                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1014                         for (i = 0; i < 110; i++) {
1015                                 udelay(100);
1016                         }
1017                 }
1018
1019                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1020                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1021                         BMCR_ANENABLE);
1022                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1023                         /* Speed up link-up time when the link partner
1024                          * does not autonegotiate which is very common
1025                          * in blade servers. Some blade servers use
1026                          * IPMI for kerboard input and it's important
1027                          * to minimize link disruptions. Autoneg. involves
1028                          * exchanging base pages plus 3 next pages and
1029                          * normally completes in about 120 msec.
1030                          */
1031                         bp->current_interval = SERDES_AN_TIMEOUT;
1032                         bp->serdes_an_pending = 1;
1033                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1034                 }
1035         }
1036
1037         return 0;
1038 }
1039
1040 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1041         (ADVERTISED_1000baseT_Full)
1042
1043 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1044         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1045         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1046         ADVERTISED_1000baseT_Full)
1047
1048 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1049         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1050         
1051 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1052
1053 static int
1054 bnx2_setup_copper_phy(struct bnx2 *bp)
1055 {
1056         u32 bmcr;
1057         u32 new_bmcr;
1058
1059         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1060
1061         if (bp->autoneg & AUTONEG_SPEED) {
1062                 u32 adv_reg, adv1000_reg;
1063                 u32 new_adv_reg = 0;
1064                 u32 new_adv1000_reg = 0;
1065
1066                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1067                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1068                         ADVERTISE_PAUSE_ASYM);
1069
1070                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1071                 adv1000_reg &= PHY_ALL_1000_SPEED;
1072
1073                 if (bp->advertising & ADVERTISED_10baseT_Half)
1074                         new_adv_reg |= ADVERTISE_10HALF;
1075                 if (bp->advertising & ADVERTISED_10baseT_Full)
1076                         new_adv_reg |= ADVERTISE_10FULL;
1077                 if (bp->advertising & ADVERTISED_100baseT_Half)
1078                         new_adv_reg |= ADVERTISE_100HALF;
1079                 if (bp->advertising & ADVERTISED_100baseT_Full)
1080                         new_adv_reg |= ADVERTISE_100FULL;
1081                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1082                         new_adv1000_reg |= ADVERTISE_1000FULL;
1083                 
1084                 new_adv_reg |= ADVERTISE_CSMA;
1085
1086                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1087
1088                 if ((adv1000_reg != new_adv1000_reg) ||
1089                         (adv_reg != new_adv_reg) ||
1090                         ((bmcr & BMCR_ANENABLE) == 0)) {
1091
1092                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1093                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1094                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1095                                 BMCR_ANENABLE);
1096                 }
1097                 else if (bp->link_up) {
1098                         /* Flow ctrl may have changed from auto to forced */
1099                         /* or vice-versa. */
1100
1101                         bnx2_resolve_flow_ctrl(bp);
1102                         bnx2_set_mac_link(bp);
1103                 }
1104                 return 0;
1105         }
1106
1107         new_bmcr = 0;
1108         if (bp->req_line_speed == SPEED_100) {
1109                 new_bmcr |= BMCR_SPEED100;
1110         }
1111         if (bp->req_duplex == DUPLEX_FULL) {
1112                 new_bmcr |= BMCR_FULLDPLX;
1113         }
1114         if (new_bmcr != bmcr) {
1115                 u32 bmsr;
1116                 int i = 0;
1117
1118                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1120                 
1121                 if (bmsr & BMSR_LSTATUS) {
1122                         /* Force link down */
1123                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1124                         do {
1125                                 udelay(100);
1126                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1127                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1128                                 i++;
1129                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1130                 }
1131
1132                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1133
1134                 /* Normally, the new speed is setup after the link has
1135                  * gone down and up again. In some cases, link will not go
1136                  * down so we need to set up the new speed here.
1137                  */
1138                 if (bmsr & BMSR_LSTATUS) {
1139                         bp->line_speed = bp->req_line_speed;
1140                         bp->duplex = bp->req_duplex;
1141                         bnx2_resolve_flow_ctrl(bp);
1142                         bnx2_set_mac_link(bp);
1143                 }
1144         }
1145         return 0;
1146 }
1147
1148 static int
1149 bnx2_setup_phy(struct bnx2 *bp)
1150 {
1151         if (bp->loopback == MAC_LOOPBACK)
1152                 return 0;
1153
1154         if (bp->phy_flags & PHY_SERDES_FLAG) {
1155                 return (bnx2_setup_serdes_phy(bp));
1156         }
1157         else {
1158                 return (bnx2_setup_copper_phy(bp));
1159         }
1160 }
1161
1162 static int
1163 bnx2_init_5708s_phy(struct bnx2 *bp)
1164 {
1165         u32 val;
1166
1167         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1168         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1169         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1170
1171         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1172         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1173         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1174
1175         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1176         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1177         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1178
1179         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1180                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1181                 val |= BCM5708S_UP1_2G5;
1182                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1183         }
1184
1185         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1186             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1187             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1188                 /* increase tx signal amplitude */
1189                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1190                                BCM5708S_BLK_ADDR_TX_MISC);
1191                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1192                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1193                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1194                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1195         }
1196
1197         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1198               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1199
1200         if (val) {
1201                 u32 is_backplane;
1202
1203                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1204                                           BNX2_SHARED_HW_CFG_CONFIG);
1205                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1206                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1207                                        BCM5708S_BLK_ADDR_TX_MISC);
1208                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1209                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1210                                        BCM5708S_BLK_ADDR_DIG);
1211                 }
1212         }
1213         return 0;
1214 }
1215
1216 static int
1217 bnx2_init_5706s_phy(struct bnx2 *bp)
1218 {
1219         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1220
1221         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1222                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1223         }
1224
1225         if (bp->dev->mtu > 1500) {
1226                 u32 val;
1227
1228                 /* Set extended packet length bit */
1229                 bnx2_write_phy(bp, 0x18, 0x7);
1230                 bnx2_read_phy(bp, 0x18, &val);
1231                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1232
1233                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1234                 bnx2_read_phy(bp, 0x1c, &val);
1235                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1236         }
1237         else {
1238                 u32 val;
1239
1240                 bnx2_write_phy(bp, 0x18, 0x7);
1241                 bnx2_read_phy(bp, 0x18, &val);
1242                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1243
1244                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1245                 bnx2_read_phy(bp, 0x1c, &val);
1246                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int
1253 bnx2_init_copper_phy(struct bnx2 *bp)
1254 {
1255         u32 val;
1256
1257         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1258
1259         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1260                 bnx2_write_phy(bp, 0x18, 0x0c00);
1261                 bnx2_write_phy(bp, 0x17, 0x000a);
1262                 bnx2_write_phy(bp, 0x15, 0x310b);
1263                 bnx2_write_phy(bp, 0x17, 0x201f);
1264                 bnx2_write_phy(bp, 0x15, 0x9506);
1265                 bnx2_write_phy(bp, 0x17, 0x401f);
1266                 bnx2_write_phy(bp, 0x15, 0x14e2);
1267                 bnx2_write_phy(bp, 0x18, 0x0400);
1268         }
1269
1270         if (bp->dev->mtu > 1500) {
1271                 /* Set extended packet length bit */
1272                 bnx2_write_phy(bp, 0x18, 0x7);
1273                 bnx2_read_phy(bp, 0x18, &val);
1274                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1275
1276                 bnx2_read_phy(bp, 0x10, &val);
1277                 bnx2_write_phy(bp, 0x10, val | 0x1);
1278         }
1279         else {
1280                 bnx2_write_phy(bp, 0x18, 0x7);
1281                 bnx2_read_phy(bp, 0x18, &val);
1282                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1283
1284                 bnx2_read_phy(bp, 0x10, &val);
1285                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1286         }
1287
1288         /* ethernet@wirespeed */
1289         bnx2_write_phy(bp, 0x18, 0x7007);
1290         bnx2_read_phy(bp, 0x18, &val);
1291         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1292         return 0;
1293 }
1294
1295
1296 static int
1297 bnx2_init_phy(struct bnx2 *bp)
1298 {
1299         u32 val;
1300         int rc = 0;
1301
1302         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1303         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1304
1305         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1306
1307         bnx2_reset_phy(bp);
1308
1309         bnx2_read_phy(bp, MII_PHYSID1, &val);
1310         bp->phy_id = val << 16;
1311         bnx2_read_phy(bp, MII_PHYSID2, &val);
1312         bp->phy_id |= val & 0xffff;
1313
1314         if (bp->phy_flags & PHY_SERDES_FLAG) {
1315                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1316                         rc = bnx2_init_5706s_phy(bp);
1317                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1318                         rc = bnx2_init_5708s_phy(bp);
1319         }
1320         else {
1321                 rc = bnx2_init_copper_phy(bp);
1322         }
1323
1324         bnx2_setup_phy(bp);
1325
1326         return rc;
1327 }
1328
1329 static int
1330 bnx2_set_mac_loopback(struct bnx2 *bp)
1331 {
1332         u32 mac_mode;
1333
1334         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1335         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1336         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1337         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1338         bp->link_up = 1;
1339         return 0;
1340 }
1341
1342 static int bnx2_test_link(struct bnx2 *);
1343
1344 static int
1345 bnx2_set_phy_loopback(struct bnx2 *bp)
1346 {
1347         u32 mac_mode;
1348         int rc, i;
1349
1350         spin_lock_bh(&bp->phy_lock);
1351         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1352                             BMCR_SPEED1000);
1353         spin_unlock_bh(&bp->phy_lock);
1354         if (rc)
1355                 return rc;
1356
1357         for (i = 0; i < 10; i++) {
1358                 if (bnx2_test_link(bp) == 0)
1359                         break;
1360                 udelay(10);
1361         }
1362
1363         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1364         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1365                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1366                       BNX2_EMAC_MODE_25G);
1367
1368         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1369         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1370         bp->link_up = 1;
1371         return 0;
1372 }
1373
1374 static int
1375 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1376 {
1377         int i;
1378         u32 val;
1379
1380         bp->fw_wr_seq++;
1381         msg_data |= bp->fw_wr_seq;
1382
1383         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1384
1385         /* wait for an acknowledgement. */
1386         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1387                 msleep(10);
1388
1389                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1390
1391                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1392                         break;
1393         }
1394         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1395                 return 0;
1396
1397         /* If we timed out, inform the firmware that this is the case. */
1398         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1399                 if (!silent)
1400                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1401                                             "%x\n", msg_data);
1402
1403                 msg_data &= ~BNX2_DRV_MSG_CODE;
1404                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1405
1406                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1407
1408                 return -EBUSY;
1409         }
1410
1411         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1412                 return -EIO;
1413
1414         return 0;
1415 }
1416
1417 static void
1418 bnx2_init_context(struct bnx2 *bp)
1419 {
1420         u32 vcid;
1421
1422         vcid = 96;
1423         while (vcid) {
1424                 u32 vcid_addr, pcid_addr, offset;
1425
1426                 vcid--;
1427
1428                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1429                         u32 new_vcid;
1430
1431                         vcid_addr = GET_PCID_ADDR(vcid);
1432                         if (vcid & 0x8) {
1433                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1434                         }
1435                         else {
1436                                 new_vcid = vcid;
1437                         }
1438                         pcid_addr = GET_PCID_ADDR(new_vcid);
1439                 }
1440                 else {
1441                         vcid_addr = GET_CID_ADDR(vcid);
1442                         pcid_addr = vcid_addr;
1443                 }
1444
1445                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1446                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1447
1448                 /* Zero out the context. */
1449                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1450                         CTX_WR(bp, 0x00, offset, 0);
1451                 }
1452
1453                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1454                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1455         }
1456 }
1457
1458 static int
1459 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1460 {
1461         u16 *good_mbuf;
1462         u32 good_mbuf_cnt;
1463         u32 val;
1464
1465         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1466         if (good_mbuf == NULL) {
1467                 printk(KERN_ERR PFX "Failed to allocate memory in "
1468                                     "bnx2_alloc_bad_rbuf\n");
1469                 return -ENOMEM;
1470         }
1471
1472         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1473                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1474
1475         good_mbuf_cnt = 0;
1476
1477         /* Allocate a bunch of mbufs and save the good ones in an array. */
1478         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1479         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1480                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1481
1482                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1483
1484                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1485
1486                 /* The addresses with Bit 9 set are bad memory blocks. */
1487                 if (!(val & (1 << 9))) {
1488                         good_mbuf[good_mbuf_cnt] = (u16) val;
1489                         good_mbuf_cnt++;
1490                 }
1491
1492                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1493         }
1494
1495         /* Free the good ones back to the mbuf pool thus discarding
1496          * all the bad ones. */
1497         while (good_mbuf_cnt) {
1498                 good_mbuf_cnt--;
1499
1500                 val = good_mbuf[good_mbuf_cnt];
1501                 val = (val << 9) | val | 1;
1502
1503                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1504         }
1505         kfree(good_mbuf);
1506         return 0;
1507 }
1508
1509 static void
1510 bnx2_set_mac_addr(struct bnx2 *bp) 
1511 {
1512         u32 val;
1513         u8 *mac_addr = bp->dev->dev_addr;
1514
1515         val = (mac_addr[0] << 8) | mac_addr[1];
1516
1517         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1518
1519         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1520                 (mac_addr[4] << 8) | mac_addr[5];
1521
1522         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1523 }
1524
1525 static inline int
1526 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1527 {
1528         struct sk_buff *skb;
1529         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1530         dma_addr_t mapping;
1531         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1532         unsigned long align;
1533
1534         skb = dev_alloc_skb(bp->rx_buf_size);
1535         if (skb == NULL) {
1536                 return -ENOMEM;
1537         }
1538
1539         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1540                 skb_reserve(skb, 8 - align);
1541         }
1542
1543         skb->dev = bp->dev;
1544         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1545                 PCI_DMA_FROMDEVICE);
1546
1547         rx_buf->skb = skb;
1548         pci_unmap_addr_set(rx_buf, mapping, mapping);
1549
1550         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1551         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1552
1553         bp->rx_prod_bseq += bp->rx_buf_use_size;
1554
1555         return 0;
1556 }
1557
1558 static void
1559 bnx2_phy_int(struct bnx2 *bp)
1560 {
1561         u32 new_link_state, old_link_state;
1562
1563         new_link_state = bp->status_blk->status_attn_bits &
1564                 STATUS_ATTN_BITS_LINK_STATE;
1565         old_link_state = bp->status_blk->status_attn_bits_ack &
1566                 STATUS_ATTN_BITS_LINK_STATE;
1567         if (new_link_state != old_link_state) {
1568                 if (new_link_state) {
1569                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1570                                 STATUS_ATTN_BITS_LINK_STATE);
1571                 }
1572                 else {
1573                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1574                                 STATUS_ATTN_BITS_LINK_STATE);
1575                 }
1576                 bnx2_set_link(bp);
1577         }
1578 }
1579
1580 static void
1581 bnx2_tx_int(struct bnx2 *bp)
1582 {
1583         struct status_block *sblk = bp->status_blk;
1584         u16 hw_cons, sw_cons, sw_ring_cons;
1585         int tx_free_bd = 0;
1586
1587         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1588         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1589                 hw_cons++;
1590         }
1591         sw_cons = bp->tx_cons;
1592
1593         while (sw_cons != hw_cons) {
1594                 struct sw_bd *tx_buf;
1595                 struct sk_buff *skb;
1596                 int i, last;
1597
1598                 sw_ring_cons = TX_RING_IDX(sw_cons);
1599
1600                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1601                 skb = tx_buf->skb;
1602 #ifdef BCM_TSO 
1603                 /* partial BD completions possible with TSO packets */
1604                 if (skb_shinfo(skb)->tso_size) {
1605                         u16 last_idx, last_ring_idx;
1606
1607                         last_idx = sw_cons +
1608                                 skb_shinfo(skb)->nr_frags + 1;
1609                         last_ring_idx = sw_ring_cons +
1610                                 skb_shinfo(skb)->nr_frags + 1;
1611                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1612                                 last_idx++;
1613                         }
1614                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1615                                 break;
1616                         }
1617                 }
1618 #endif
1619                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1620                         skb_headlen(skb), PCI_DMA_TODEVICE);
1621
1622                 tx_buf->skb = NULL;
1623                 last = skb_shinfo(skb)->nr_frags;
1624
1625                 for (i = 0; i < last; i++) {
1626                         sw_cons = NEXT_TX_BD(sw_cons);
1627
1628                         pci_unmap_page(bp->pdev,
1629                                 pci_unmap_addr(
1630                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1631                                         mapping),
1632                                 skb_shinfo(skb)->frags[i].size,
1633                                 PCI_DMA_TODEVICE);
1634                 }
1635
1636                 sw_cons = NEXT_TX_BD(sw_cons);
1637
1638                 tx_free_bd += last + 1;
1639
1640                 dev_kfree_skb_irq(skb);
1641
1642                 hw_cons = bp->hw_tx_cons =
1643                         sblk->status_tx_quick_consumer_index0;
1644
1645                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1646                         hw_cons++;
1647                 }
1648         }
1649
1650         bp->tx_cons = sw_cons;
1651
1652         if (unlikely(netif_queue_stopped(bp->dev))) {
1653                 spin_lock(&bp->tx_lock);
1654                 if ((netif_queue_stopped(bp->dev)) &&
1655                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1656
1657                         netif_wake_queue(bp->dev);
1658                 }
1659                 spin_unlock(&bp->tx_lock);
1660         }
1661 }
1662
1663 static inline void
1664 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1665         u16 cons, u16 prod)
1666 {
1667         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1668         struct rx_bd *cons_bd, *prod_bd;
1669
1670         cons_rx_buf = &bp->rx_buf_ring[cons];
1671         prod_rx_buf = &bp->rx_buf_ring[prod];
1672
1673         pci_dma_sync_single_for_device(bp->pdev,
1674                 pci_unmap_addr(cons_rx_buf, mapping),
1675                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1676
1677         bp->rx_prod_bseq += bp->rx_buf_use_size;
1678
1679         prod_rx_buf->skb = skb;
1680
1681         if (cons == prod)
1682                 return;
1683
1684         pci_unmap_addr_set(prod_rx_buf, mapping,
1685                         pci_unmap_addr(cons_rx_buf, mapping));
1686
1687         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1688         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1689         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1690         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1691 }
1692
1693 static int
1694 bnx2_rx_int(struct bnx2 *bp, int budget)
1695 {
1696         struct status_block *sblk = bp->status_blk;
1697         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1698         struct l2_fhdr *rx_hdr;
1699         int rx_pkt = 0;
1700
1701         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1702         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1703                 hw_cons++;
1704         }
1705         sw_cons = bp->rx_cons;
1706         sw_prod = bp->rx_prod;
1707
1708         /* Memory barrier necessary as speculative reads of the rx
1709          * buffer can be ahead of the index in the status block
1710          */
1711         rmb();
1712         while (sw_cons != hw_cons) {
1713                 unsigned int len;
1714                 u32 status;
1715                 struct sw_bd *rx_buf;
1716                 struct sk_buff *skb;
1717                 dma_addr_t dma_addr;
1718
1719                 sw_ring_cons = RX_RING_IDX(sw_cons);
1720                 sw_ring_prod = RX_RING_IDX(sw_prod);
1721
1722                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1723                 skb = rx_buf->skb;
1724
1725                 rx_buf->skb = NULL;
1726
1727                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1728
1729                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1730                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1731
1732                 rx_hdr = (struct l2_fhdr *) skb->data;
1733                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1734
1735                 if ((status = rx_hdr->l2_fhdr_status) &
1736                         (L2_FHDR_ERRORS_BAD_CRC |
1737                         L2_FHDR_ERRORS_PHY_DECODE |
1738                         L2_FHDR_ERRORS_ALIGNMENT |
1739                         L2_FHDR_ERRORS_TOO_SHORT |
1740                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1741
1742                         goto reuse_rx;
1743                 }
1744
1745                 /* Since we don't have a jumbo ring, copy small packets
1746                  * if mtu > 1500
1747                  */
1748                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1749                         struct sk_buff *new_skb;
1750
1751                         new_skb = dev_alloc_skb(len + 2);
1752                         if (new_skb == NULL)
1753                                 goto reuse_rx;
1754
1755                         /* aligned copy */
1756                         memcpy(new_skb->data,
1757                                 skb->data + bp->rx_offset - 2,
1758                                 len + 2);
1759
1760                         skb_reserve(new_skb, 2);
1761                         skb_put(new_skb, len);
1762                         new_skb->dev = bp->dev;
1763
1764                         bnx2_reuse_rx_skb(bp, skb,
1765                                 sw_ring_cons, sw_ring_prod);
1766
1767                         skb = new_skb;
1768                 }
1769                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1770                         pci_unmap_single(bp->pdev, dma_addr,
1771                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1772
1773                         skb_reserve(skb, bp->rx_offset);
1774                         skb_put(skb, len);
1775                 }
1776                 else {
1777 reuse_rx:
1778                         bnx2_reuse_rx_skb(bp, skb,
1779                                 sw_ring_cons, sw_ring_prod);
1780                         goto next_rx;
1781                 }
1782
1783                 skb->protocol = eth_type_trans(skb, bp->dev);
1784
1785                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1786                         (htons(skb->protocol) != 0x8100)) {
1787
1788                         dev_kfree_skb_irq(skb);
1789                         goto next_rx;
1790
1791                 }
1792
1793                 skb->ip_summed = CHECKSUM_NONE;
1794                 if (bp->rx_csum &&
1795                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1796                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1797
1798                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1799                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1800                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1801                 }
1802
1803 #ifdef BCM_VLAN
1804                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1805                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1806                                 rx_hdr->l2_fhdr_vlan_tag);
1807                 }
1808                 else
1809 #endif
1810                         netif_receive_skb(skb);
1811
1812                 bp->dev->last_rx = jiffies;
1813                 rx_pkt++;
1814
1815 next_rx:
1816                 sw_cons = NEXT_RX_BD(sw_cons);
1817                 sw_prod = NEXT_RX_BD(sw_prod);
1818
1819                 if ((rx_pkt == budget))
1820                         break;
1821
1822                 /* Refresh hw_cons to see if there is new work */
1823                 if (sw_cons == hw_cons) {
1824                         hw_cons = bp->hw_rx_cons =
1825                                 sblk->status_rx_quick_consumer_index0;
1826                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1827                                 hw_cons++;
1828                         rmb();
1829                 }
1830         }
1831         bp->rx_cons = sw_cons;
1832         bp->rx_prod = sw_prod;
1833
1834         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1835
1836         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1837
1838         mmiowb();
1839
1840         return rx_pkt;
1841
1842 }
1843
1844 /* MSI ISR - The only difference between this and the INTx ISR
1845  * is that the MSI interrupt is always serviced.
1846  */
1847 static irqreturn_t
1848 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1849 {
1850         struct net_device *dev = dev_instance;
1851         struct bnx2 *bp = netdev_priv(dev);
1852
1853         prefetch(bp->status_blk);
1854         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1855                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1856                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1857
1858         /* Return here if interrupt is disabled. */
1859         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1860                 return IRQ_HANDLED;
1861
1862         netif_rx_schedule(dev);
1863
1864         return IRQ_HANDLED;
1865 }
1866
1867 static irqreturn_t
1868 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1869 {
1870         struct net_device *dev = dev_instance;
1871         struct bnx2 *bp = netdev_priv(dev);
1872
1873         /* When using INTx, it is possible for the interrupt to arrive
1874          * at the CPU before the status block posted prior to the
1875          * interrupt. Reading a register will flush the status block.
1876          * When using MSI, the MSI message will always complete after
1877          * the status block write.
1878          */
1879         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1880             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1881              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1882                 return IRQ_NONE;
1883
1884         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1885                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1886                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1887
1888         /* Return here if interrupt is shared and is disabled. */
1889         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1890                 return IRQ_HANDLED;
1891
1892         netif_rx_schedule(dev);
1893
1894         return IRQ_HANDLED;
1895 }
1896
1897 static inline int
1898 bnx2_has_work(struct bnx2 *bp)
1899 {
1900         struct status_block *sblk = bp->status_blk;
1901
1902         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1903             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1904                 return 1;
1905
1906         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1907             bp->link_up)
1908                 return 1;
1909
1910         return 0;
1911 }
1912
1913 static int
1914 bnx2_poll(struct net_device *dev, int *budget)
1915 {
1916         struct bnx2 *bp = netdev_priv(dev);
1917
1918         if ((bp->status_blk->status_attn_bits &
1919                 STATUS_ATTN_BITS_LINK_STATE) !=
1920                 (bp->status_blk->status_attn_bits_ack &
1921                 STATUS_ATTN_BITS_LINK_STATE)) {
1922
1923                 spin_lock(&bp->phy_lock);
1924                 bnx2_phy_int(bp);
1925                 spin_unlock(&bp->phy_lock);
1926
1927                 /* This is needed to take care of transient status
1928                  * during link changes.
1929                  */
1930                 REG_WR(bp, BNX2_HC_COMMAND,
1931                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1932                 REG_RD(bp, BNX2_HC_COMMAND);
1933         }
1934
1935         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1936                 bnx2_tx_int(bp);
1937
1938         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1939                 int orig_budget = *budget;
1940                 int work_done;
1941
1942                 if (orig_budget > dev->quota)
1943                         orig_budget = dev->quota;
1944                 
1945                 work_done = bnx2_rx_int(bp, orig_budget);
1946                 *budget -= work_done;
1947                 dev->quota -= work_done;
1948         }
1949         
1950         bp->last_status_idx = bp->status_blk->status_idx;
1951         rmb();
1952
1953         if (!bnx2_has_work(bp)) {
1954                 netif_rx_complete(dev);
1955                 if (likely(bp->flags & USING_MSI_FLAG)) {
1956                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1957                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1958                                bp->last_status_idx);
1959                         return 0;
1960                 }
1961                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1962                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1963                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1964                        bp->last_status_idx);
1965
1966                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1967                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1968                        bp->last_status_idx);
1969                 return 0;
1970         }
1971
1972         return 1;
1973 }
1974
1975 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1976  * from set_multicast.
1977  */
1978 static void
1979 bnx2_set_rx_mode(struct net_device *dev)
1980 {
1981         struct bnx2 *bp = netdev_priv(dev);
1982         u32 rx_mode, sort_mode;
1983         int i;
1984
1985         spin_lock_bh(&bp->phy_lock);
1986
1987         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1988                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1989         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1990 #ifdef BCM_VLAN
1991         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1992                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1993 #else
1994         if (!(bp->flags & ASF_ENABLE_FLAG))
1995                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1996 #endif
1997         if (dev->flags & IFF_PROMISC) {
1998                 /* Promiscuous mode. */
1999                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2000                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2001         }
2002         else if (dev->flags & IFF_ALLMULTI) {
2003                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2004                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2005                                0xffffffff);
2006                 }
2007                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2008         }
2009         else {
2010                 /* Accept one or more multicast(s). */
2011                 struct dev_mc_list *mclist;
2012                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2013                 u32 regidx;
2014                 u32 bit;
2015                 u32 crc;
2016
2017                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2018
2019                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2020                      i++, mclist = mclist->next) {
2021
2022                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2023                         bit = crc & 0xff;
2024                         regidx = (bit & 0xe0) >> 5;
2025                         bit &= 0x1f;
2026                         mc_filter[regidx] |= (1 << bit);
2027                 }
2028
2029                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2030                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2031                                mc_filter[i]);
2032                 }
2033
2034                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2035         }
2036
2037         if (rx_mode != bp->rx_mode) {
2038                 bp->rx_mode = rx_mode;
2039                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2040         }
2041
2042         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2043         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2044         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2045
2046         spin_unlock_bh(&bp->phy_lock);
2047 }
2048
2049 static void
2050 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2051         u32 rv2p_proc)
2052 {
2053         int i;
2054         u32 val;
2055
2056
2057         for (i = 0; i < rv2p_code_len; i += 8) {
2058                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2059                 rv2p_code++;
2060                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2061                 rv2p_code++;
2062
2063                 if (rv2p_proc == RV2P_PROC1) {
2064                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2065                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2066                 }
2067                 else {
2068                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2069                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2070                 }
2071         }
2072
2073         /* Reset the processor, un-stall is done later. */
2074         if (rv2p_proc == RV2P_PROC1) {
2075                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2076         }
2077         else {
2078                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2079         }
2080 }
2081
2082 static void
2083 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2084 {
2085         u32 offset;
2086         u32 val;
2087
2088         /* Halt the CPU. */
2089         val = REG_RD_IND(bp, cpu_reg->mode);
2090         val |= cpu_reg->mode_value_halt;
2091         REG_WR_IND(bp, cpu_reg->mode, val);
2092         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2093
2094         /* Load the Text area. */
2095         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2096         if (fw->text) {
2097                 int j;
2098
2099                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2100                         REG_WR_IND(bp, offset, fw->text[j]);
2101                 }
2102         }
2103
2104         /* Load the Data area. */
2105         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2106         if (fw->data) {
2107                 int j;
2108
2109                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2110                         REG_WR_IND(bp, offset, fw->data[j]);
2111                 }
2112         }
2113
2114         /* Load the SBSS area. */
2115         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2116         if (fw->sbss) {
2117                 int j;
2118
2119                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2120                         REG_WR_IND(bp, offset, fw->sbss[j]);
2121                 }
2122         }
2123
2124         /* Load the BSS area. */
2125         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2126         if (fw->bss) {
2127                 int j;
2128
2129                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2130                         REG_WR_IND(bp, offset, fw->bss[j]);
2131                 }
2132         }
2133
2134         /* Load the Read-Only area. */
2135         offset = cpu_reg->spad_base +
2136                 (fw->rodata_addr - cpu_reg->mips_view_base);
2137         if (fw->rodata) {
2138                 int j;
2139
2140                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2141                         REG_WR_IND(bp, offset, fw->rodata[j]);
2142                 }
2143         }
2144
2145         /* Clear the pre-fetch instruction. */
2146         REG_WR_IND(bp, cpu_reg->inst, 0);
2147         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2148
2149         /* Start the CPU. */
2150         val = REG_RD_IND(bp, cpu_reg->mode);
2151         val &= ~cpu_reg->mode_value_halt;
2152         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2153         REG_WR_IND(bp, cpu_reg->mode, val);
2154 }
2155
2156 static void
2157 bnx2_init_cpus(struct bnx2 *bp)
2158 {
2159         struct cpu_reg cpu_reg;
2160         struct fw_info fw;
2161
2162         /* Initialize the RV2P processor. */
2163         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2164         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2165
2166         /* Initialize the RX Processor. */
2167         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2168         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2169         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2170         cpu_reg.state = BNX2_RXP_CPU_STATE;
2171         cpu_reg.state_value_clear = 0xffffff;
2172         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2173         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2174         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2175         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2176         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2177         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2178         cpu_reg.mips_view_base = 0x8000000;
2179     
2180         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2181         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2182         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2183         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2184
2185         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2186         fw.text_len = bnx2_RXP_b06FwTextLen;
2187         fw.text_index = 0;
2188         fw.text = bnx2_RXP_b06FwText;
2189
2190         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2191         fw.data_len = bnx2_RXP_b06FwDataLen;
2192         fw.data_index = 0;
2193         fw.data = bnx2_RXP_b06FwData;
2194
2195         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2196         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2197         fw.sbss_index = 0;
2198         fw.sbss = bnx2_RXP_b06FwSbss;
2199
2200         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2201         fw.bss_len = bnx2_RXP_b06FwBssLen;
2202         fw.bss_index = 0;
2203         fw.bss = bnx2_RXP_b06FwBss;
2204
2205         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2206         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2207         fw.rodata_index = 0;
2208         fw.rodata = bnx2_RXP_b06FwRodata;
2209
2210         load_cpu_fw(bp, &cpu_reg, &fw);
2211
2212         /* Initialize the TX Processor. */
2213         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2214         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2215         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2216         cpu_reg.state = BNX2_TXP_CPU_STATE;
2217         cpu_reg.state_value_clear = 0xffffff;
2218         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2219         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2220         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2221         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2222         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2223         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2224         cpu_reg.mips_view_base = 0x8000000;
2225     
2226         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2227         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2228         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2229         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2230
2231         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2232         fw.text_len = bnx2_TXP_b06FwTextLen;
2233         fw.text_index = 0;
2234         fw.text = bnx2_TXP_b06FwText;
2235
2236         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2237         fw.data_len = bnx2_TXP_b06FwDataLen;
2238         fw.data_index = 0;
2239         fw.data = bnx2_TXP_b06FwData;
2240
2241         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2242         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2243         fw.sbss_index = 0;
2244         fw.sbss = bnx2_TXP_b06FwSbss;
2245
2246         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2247         fw.bss_len = bnx2_TXP_b06FwBssLen;
2248         fw.bss_index = 0;
2249         fw.bss = bnx2_TXP_b06FwBss;
2250
2251         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2252         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2253         fw.rodata_index = 0;
2254         fw.rodata = bnx2_TXP_b06FwRodata;
2255
2256         load_cpu_fw(bp, &cpu_reg, &fw);
2257
2258         /* Initialize the TX Patch-up Processor. */
2259         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2260         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2261         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2262         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2263         cpu_reg.state_value_clear = 0xffffff;
2264         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2265         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2266         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2267         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2268         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2269         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2270         cpu_reg.mips_view_base = 0x8000000;
2271     
2272         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2273         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2274         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2275         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2276
2277         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2278         fw.text_len = bnx2_TPAT_b06FwTextLen;
2279         fw.text_index = 0;
2280         fw.text = bnx2_TPAT_b06FwText;
2281
2282         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2283         fw.data_len = bnx2_TPAT_b06FwDataLen;
2284         fw.data_index = 0;
2285         fw.data = bnx2_TPAT_b06FwData;
2286
2287         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2288         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2289         fw.sbss_index = 0;
2290         fw.sbss = bnx2_TPAT_b06FwSbss;
2291
2292         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2293         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2294         fw.bss_index = 0;
2295         fw.bss = bnx2_TPAT_b06FwBss;
2296
2297         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2298         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2299         fw.rodata_index = 0;
2300         fw.rodata = bnx2_TPAT_b06FwRodata;
2301
2302         load_cpu_fw(bp, &cpu_reg, &fw);
2303
2304         /* Initialize the Completion Processor. */
2305         cpu_reg.mode = BNX2_COM_CPU_MODE;
2306         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2307         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2308         cpu_reg.state = BNX2_COM_CPU_STATE;
2309         cpu_reg.state_value_clear = 0xffffff;
2310         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2311         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2312         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2313         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2314         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2315         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2316         cpu_reg.mips_view_base = 0x8000000;
2317     
2318         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2319         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2320         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2321         fw.start_addr = bnx2_COM_b06FwStartAddr;
2322
2323         fw.text_addr = bnx2_COM_b06FwTextAddr;
2324         fw.text_len = bnx2_COM_b06FwTextLen;
2325         fw.text_index = 0;
2326         fw.text = bnx2_COM_b06FwText;
2327
2328         fw.data_addr = bnx2_COM_b06FwDataAddr;
2329         fw.data_len = bnx2_COM_b06FwDataLen;
2330         fw.data_index = 0;
2331         fw.data = bnx2_COM_b06FwData;
2332
2333         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2334         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2335         fw.sbss_index = 0;
2336         fw.sbss = bnx2_COM_b06FwSbss;
2337
2338         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2339         fw.bss_len = bnx2_COM_b06FwBssLen;
2340         fw.bss_index = 0;
2341         fw.bss = bnx2_COM_b06FwBss;
2342
2343         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2344         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2345         fw.rodata_index = 0;
2346         fw.rodata = bnx2_COM_b06FwRodata;
2347
2348         load_cpu_fw(bp, &cpu_reg, &fw);
2349
2350 }
2351
2352 static int
2353 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2354 {
2355         u16 pmcsr;
2356
2357         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2358
2359         switch (state) {
2360         case PCI_D0: {
2361                 u32 val;
2362
2363                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2364                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2365                         PCI_PM_CTRL_PME_STATUS);
2366
2367                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2368                         /* delay required during transition out of D3hot */
2369                         msleep(20);
2370
2371                 val = REG_RD(bp, BNX2_EMAC_MODE);
2372                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2373                 val &= ~BNX2_EMAC_MODE_MPKT;
2374                 REG_WR(bp, BNX2_EMAC_MODE, val);
2375
2376                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2377                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2378                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2379                 break;
2380         }
2381         case PCI_D3hot: {
2382                 int i;
2383                 u32 val, wol_msg;
2384
2385                 if (bp->wol) {
2386                         u32 advertising;
2387                         u8 autoneg;
2388
2389                         autoneg = bp->autoneg;
2390                         advertising = bp->advertising;
2391
2392                         bp->autoneg = AUTONEG_SPEED;
2393                         bp->advertising = ADVERTISED_10baseT_Half |
2394                                 ADVERTISED_10baseT_Full |
2395                                 ADVERTISED_100baseT_Half |
2396                                 ADVERTISED_100baseT_Full |
2397                                 ADVERTISED_Autoneg;
2398
2399                         bnx2_setup_copper_phy(bp);
2400
2401                         bp->autoneg = autoneg;
2402                         bp->advertising = advertising;
2403
2404                         bnx2_set_mac_addr(bp);
2405
2406                         val = REG_RD(bp, BNX2_EMAC_MODE);
2407
2408                         /* Enable port mode. */
2409                         val &= ~BNX2_EMAC_MODE_PORT;
2410                         val |= BNX2_EMAC_MODE_PORT_MII |
2411                                BNX2_EMAC_MODE_MPKT_RCVD |
2412                                BNX2_EMAC_MODE_ACPI_RCVD |
2413                                BNX2_EMAC_MODE_MPKT;
2414
2415                         REG_WR(bp, BNX2_EMAC_MODE, val);
2416
2417                         /* receive all multicast */
2418                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2419                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2420                                        0xffffffff);
2421                         }
2422                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2423                                BNX2_EMAC_RX_MODE_SORT_MODE);
2424
2425                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2426                               BNX2_RPM_SORT_USER0_MC_EN;
2427                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2428                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2429                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2430                                BNX2_RPM_SORT_USER0_ENA);
2431
2432                         /* Need to enable EMAC and RPM for WOL. */
2433                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2434                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2435                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2436                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2437
2438                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2439                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2440                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2441
2442                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2443                 }
2444                 else {
2445                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2446                 }
2447
2448                 if (!(bp->flags & NO_WOL_FLAG))
2449                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2450
2451                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2452                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2453                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2454
2455                         if (bp->wol)
2456                                 pmcsr |= 3;
2457                 }
2458                 else {
2459                         pmcsr |= 3;
2460                 }
2461                 if (bp->wol) {
2462                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2463                 }
2464                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2465                                       pmcsr);
2466
2467                 /* No more memory access after this point until
2468                  * device is brought back to D0.
2469                  */
2470                 udelay(50);
2471                 break;
2472         }
2473         default:
2474                 return -EINVAL;
2475         }
2476         return 0;
2477 }
2478
2479 static int
2480 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2481 {
2482         u32 val;
2483         int j;
2484
2485         /* Request access to the flash interface. */
2486         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2487         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2488                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2489                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2490                         break;
2491
2492                 udelay(5);
2493         }
2494
2495         if (j >= NVRAM_TIMEOUT_COUNT)
2496                 return -EBUSY;
2497
2498         return 0;
2499 }
2500
2501 static int
2502 bnx2_release_nvram_lock(struct bnx2 *bp)
2503 {
2504         int j;
2505         u32 val;
2506
2507         /* Relinquish nvram interface. */
2508         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2509
2510         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2511                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2512                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2513                         break;
2514
2515                 udelay(5);
2516         }
2517
2518         if (j >= NVRAM_TIMEOUT_COUNT)
2519                 return -EBUSY;
2520
2521         return 0;
2522 }
2523
2524
2525 static int
2526 bnx2_enable_nvram_write(struct bnx2 *bp)
2527 {
2528         u32 val;
2529
2530         val = REG_RD(bp, BNX2_MISC_CFG);
2531         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2532
2533         if (!bp->flash_info->buffered) {
2534                 int j;
2535
2536                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2537                 REG_WR(bp, BNX2_NVM_COMMAND,
2538                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2539
2540                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2541                         udelay(5);
2542
2543                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2544                         if (val & BNX2_NVM_COMMAND_DONE)
2545                                 break;
2546                 }
2547
2548                 if (j >= NVRAM_TIMEOUT_COUNT)
2549                         return -EBUSY;
2550         }
2551         return 0;
2552 }
2553
2554 static void
2555 bnx2_disable_nvram_write(struct bnx2 *bp)
2556 {
2557         u32 val;
2558
2559         val = REG_RD(bp, BNX2_MISC_CFG);
2560         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2561 }
2562
2563
2564 static void
2565 bnx2_enable_nvram_access(struct bnx2 *bp)
2566 {
2567         u32 val;
2568
2569         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2570         /* Enable both bits, even on read. */
2571         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2572                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2573 }
2574
2575 static void
2576 bnx2_disable_nvram_access(struct bnx2 *bp)
2577 {
2578         u32 val;
2579
2580         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2581         /* Disable both bits, even after read. */
2582         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2583                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2584                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2585 }
2586
2587 static int
2588 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2589 {
2590         u32 cmd;
2591         int j;
2592
2593         if (bp->flash_info->buffered)
2594                 /* Buffered flash, no erase needed */
2595                 return 0;
2596
2597         /* Build an erase command */
2598         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2599               BNX2_NVM_COMMAND_DOIT;
2600
2601         /* Need to clear DONE bit separately. */
2602         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2603
2604         /* Address of the NVRAM to read from. */
2605         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2606
2607         /* Issue an erase command. */
2608         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2609
2610         /* Wait for completion. */
2611         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2612                 u32 val;
2613
2614                 udelay(5);
2615
2616                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2617                 if (val & BNX2_NVM_COMMAND_DONE)
2618                         break;
2619         }
2620
2621         if (j >= NVRAM_TIMEOUT_COUNT)
2622                 return -EBUSY;
2623
2624         return 0;
2625 }
2626
2627 static int
2628 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2629 {
2630         u32 cmd;
2631         int j;
2632
2633         /* Build the command word. */
2634         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2635
2636         /* Calculate an offset of a buffered flash. */
2637         if (bp->flash_info->buffered) {
2638                 offset = ((offset / bp->flash_info->page_size) <<
2639                            bp->flash_info->page_bits) +
2640                           (offset % bp->flash_info->page_size);
2641         }
2642
2643         /* Need to clear DONE bit separately. */
2644         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2645
2646         /* Address of the NVRAM to read from. */
2647         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2648
2649         /* Issue a read command. */
2650         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2651
2652         /* Wait for completion. */
2653         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2654                 u32 val;
2655
2656                 udelay(5);
2657
2658                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2659                 if (val & BNX2_NVM_COMMAND_DONE) {
2660                         val = REG_RD(bp, BNX2_NVM_READ);
2661
2662                         val = be32_to_cpu(val);
2663                         memcpy(ret_val, &val, 4);
2664                         break;
2665                 }
2666         }
2667         if (j >= NVRAM_TIMEOUT_COUNT)
2668                 return -EBUSY;
2669
2670         return 0;
2671 }
2672
2673
2674 static int
2675 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2676 {
2677         u32 cmd, val32;
2678         int j;
2679
2680         /* Build the command word. */
2681         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2682
2683         /* Calculate an offset of a buffered flash. */
2684         if (bp->flash_info->buffered) {
2685                 offset = ((offset / bp->flash_info->page_size) <<
2686                           bp->flash_info->page_bits) +
2687                          (offset % bp->flash_info->page_size);
2688         }
2689
2690         /* Need to clear DONE bit separately. */
2691         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2692
2693         memcpy(&val32, val, 4);
2694         val32 = cpu_to_be32(val32);
2695
2696         /* Write the data. */
2697         REG_WR(bp, BNX2_NVM_WRITE, val32);
2698
2699         /* Address of the NVRAM to write to. */
2700         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2701
2702         /* Issue the write command. */
2703         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2704
2705         /* Wait for completion. */
2706         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2707                 udelay(5);
2708
2709                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2710                         break;
2711         }
2712         if (j >= NVRAM_TIMEOUT_COUNT)
2713                 return -EBUSY;
2714
2715         return 0;
2716 }
2717
2718 static int
2719 bnx2_init_nvram(struct bnx2 *bp)
2720 {
2721         u32 val;
2722         int j, entry_count, rc;
2723         struct flash_spec *flash;
2724
2725         /* Determine the selected interface. */
2726         val = REG_RD(bp, BNX2_NVM_CFG1);
2727
2728         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2729
2730         rc = 0;
2731         if (val & 0x40000000) {
2732
2733                 /* Flash interface has been reconfigured */
2734                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2735                      j++, flash++) {
2736                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2737                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2738                                 bp->flash_info = flash;
2739                                 break;
2740                         }
2741                 }
2742         }
2743         else {
2744                 u32 mask;
2745                 /* Not yet been reconfigured */
2746
2747                 if (val & (1 << 23))
2748                         mask = FLASH_BACKUP_STRAP_MASK;
2749                 else
2750                         mask = FLASH_STRAP_MASK;
2751
2752                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2753                         j++, flash++) {
2754
2755                         if ((val & mask) == (flash->strapping & mask)) {
2756                                 bp->flash_info = flash;
2757
2758                                 /* Request access to the flash interface. */
2759                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2760                                         return rc;
2761
2762                                 /* Enable access to flash interface */
2763                                 bnx2_enable_nvram_access(bp);
2764
2765                                 /* Reconfigure the flash interface */
2766                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2767                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2768                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2769                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2770
2771                                 /* Disable access to flash interface */
2772                                 bnx2_disable_nvram_access(bp);
2773                                 bnx2_release_nvram_lock(bp);
2774
2775                                 break;
2776                         }
2777                 }
2778         } /* if (val & 0x40000000) */
2779
2780         if (j == entry_count) {
2781                 bp->flash_info = NULL;
2782                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2783                 return -ENODEV;
2784         }
2785
2786         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2787         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2788         if (val)
2789                 bp->flash_size = val;
2790         else
2791                 bp->flash_size = bp->flash_info->total_size;
2792
2793         return rc;
2794 }
2795
2796 static int
2797 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2798                 int buf_size)
2799 {
2800         int rc = 0;
2801         u32 cmd_flags, offset32, len32, extra;
2802
2803         if (buf_size == 0)
2804                 return 0;
2805
2806         /* Request access to the flash interface. */
2807         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2808                 return rc;
2809
2810         /* Enable access to flash interface */
2811         bnx2_enable_nvram_access(bp);
2812
2813         len32 = buf_size;
2814         offset32 = offset;
2815         extra = 0;
2816
2817         cmd_flags = 0;
2818
2819         if (offset32 & 3) {
2820                 u8 buf[4];
2821                 u32 pre_len;
2822
2823                 offset32 &= ~3;
2824                 pre_len = 4 - (offset & 3);
2825
2826                 if (pre_len >= len32) {
2827                         pre_len = len32;
2828                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2829                                     BNX2_NVM_COMMAND_LAST;
2830                 }
2831                 else {
2832                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2833                 }
2834
2835                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2836
2837                 if (rc)
2838                         return rc;
2839
2840                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2841
2842                 offset32 += 4;
2843                 ret_buf += pre_len;
2844                 len32 -= pre_len;
2845         }
2846         if (len32 & 3) {
2847                 extra = 4 - (len32 & 3);
2848                 len32 = (len32 + 4) & ~3;
2849         }
2850
2851         if (len32 == 4) {
2852                 u8 buf[4];
2853
2854                 if (cmd_flags)
2855                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2856                 else
2857                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2858                                     BNX2_NVM_COMMAND_LAST;
2859
2860                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2861
2862                 memcpy(ret_buf, buf, 4 - extra);
2863         }
2864         else if (len32 > 0) {
2865                 u8 buf[4];
2866
2867                 /* Read the first word. */
2868                 if (cmd_flags)
2869                         cmd_flags = 0;
2870                 else
2871                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2872
2873                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2874
2875                 /* Advance to the next dword. */
2876                 offset32 += 4;
2877                 ret_buf += 4;
2878                 len32 -= 4;
2879
2880                 while (len32 > 4 && rc == 0) {
2881                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2882
2883                         /* Advance to the next dword. */
2884                         offset32 += 4;
2885                         ret_buf += 4;
2886                         len32 -= 4;
2887                 }
2888
2889                 if (rc)
2890                         return rc;
2891
2892                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2893                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2894
2895                 memcpy(ret_buf, buf, 4 - extra);
2896         }
2897
2898         /* Disable access to flash interface */
2899         bnx2_disable_nvram_access(bp);
2900
2901         bnx2_release_nvram_lock(bp);
2902
2903         return rc;
2904 }
2905
2906 static int
2907 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2908                 int buf_size)
2909 {
2910         u32 written, offset32, len32;
2911         u8 *buf, start[4], end[4];
2912         int rc = 0;
2913         int align_start, align_end;
2914
2915         buf = data_buf;
2916         offset32 = offset;
2917         len32 = buf_size;
2918         align_start = align_end = 0;
2919
2920         if ((align_start = (offset32 & 3))) {
2921                 offset32 &= ~3;
2922                 len32 += align_start;
2923                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2924                         return rc;
2925         }
2926
2927         if (len32 & 3) {
2928                 if ((len32 > 4) || !align_start) {
2929                         align_end = 4 - (len32 & 3);
2930                         len32 += align_end;
2931                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2932                                 end, 4))) {
2933                                 return rc;
2934                         }
2935                 }
2936         }
2937
2938         if (align_start || align_end) {
2939                 buf = kmalloc(len32, GFP_KERNEL);
2940                 if (buf == 0)
2941                         return -ENOMEM;
2942                 if (align_start) {
2943                         memcpy(buf, start, 4);
2944                 }
2945                 if (align_end) {
2946                         memcpy(buf + len32 - 4, end, 4);
2947                 }
2948                 memcpy(buf + align_start, data_buf, buf_size);
2949         }
2950
2951         written = 0;
2952         while ((written < len32) && (rc == 0)) {
2953                 u32 page_start, page_end, data_start, data_end;
2954                 u32 addr, cmd_flags;
2955                 int i;
2956                 u8 flash_buffer[264];
2957
2958                 /* Find the page_start addr */
2959                 page_start = offset32 + written;
2960                 page_start -= (page_start % bp->flash_info->page_size);
2961                 /* Find the page_end addr */
2962                 page_end = page_start + bp->flash_info->page_size;
2963                 /* Find the data_start addr */
2964                 data_start = (written == 0) ? offset32 : page_start;
2965                 /* Find the data_end addr */
2966                 data_end = (page_end > offset32 + len32) ? 
2967                         (offset32 + len32) : page_end;
2968
2969                 /* Request access to the flash interface. */
2970                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2971                         goto nvram_write_end;
2972
2973                 /* Enable access to flash interface */
2974                 bnx2_enable_nvram_access(bp);
2975
2976                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2977                 if (bp->flash_info->buffered == 0) {
2978                         int j;
2979
2980                         /* Read the whole page into the buffer
2981                          * (non-buffer flash only) */
2982                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
2983                                 if (j == (bp->flash_info->page_size - 4)) {
2984                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
2985                                 }
2986                                 rc = bnx2_nvram_read_dword(bp,
2987                                         page_start + j, 
2988                                         &flash_buffer[j], 
2989                                         cmd_flags);
2990
2991                                 if (rc)
2992                                         goto nvram_write_end;
2993
2994                                 cmd_flags = 0;
2995                         }
2996                 }
2997
2998                 /* Enable writes to flash interface (unlock write-protect) */
2999                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3000                         goto nvram_write_end;
3001
3002                 /* Erase the page */
3003                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3004                         goto nvram_write_end;
3005
3006                 /* Re-enable the write again for the actual write */
3007                 bnx2_enable_nvram_write(bp);
3008
3009                 /* Loop to write back the buffer data from page_start to
3010                  * data_start */
3011                 i = 0;
3012                 if (bp->flash_info->buffered == 0) {
3013                         for (addr = page_start; addr < data_start;
3014                                 addr += 4, i += 4) {
3015                                 
3016                                 rc = bnx2_nvram_write_dword(bp, addr,
3017                                         &flash_buffer[i], cmd_flags);
3018
3019                                 if (rc != 0)
3020                                         goto nvram_write_end;
3021
3022                                 cmd_flags = 0;
3023                         }
3024                 }
3025
3026                 /* Loop to write the new data from data_start to data_end */
3027                 for (addr = data_start; addr < data_end; addr += 4, i++) {
3028                         if ((addr == page_end - 4) ||
3029                                 ((bp->flash_info->buffered) &&
3030                                  (addr == data_end - 4))) {
3031
3032                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3033                         }
3034                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3035                                 cmd_flags);
3036
3037                         if (rc != 0)
3038                                 goto nvram_write_end;
3039
3040                         cmd_flags = 0;
3041                         buf += 4;
3042                 }
3043
3044                 /* Loop to write back the buffer data from data_end
3045                  * to page_end */
3046                 if (bp->flash_info->buffered == 0) {
3047                         for (addr = data_end; addr < page_end;
3048                                 addr += 4, i += 4) {
3049                         
3050                                 if (addr == page_end-4) {
3051                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3052                                 }
3053                                 rc = bnx2_nvram_write_dword(bp, addr,
3054                                         &flash_buffer[i], cmd_flags);
3055
3056                                 if (rc != 0)
3057                                         goto nvram_write_end;
3058
3059                                 cmd_flags = 0;
3060                         }
3061                 }
3062
3063                 /* Disable writes to flash interface (lock write-protect) */
3064                 bnx2_disable_nvram_write(bp);
3065
3066                 /* Disable access to flash interface */
3067                 bnx2_disable_nvram_access(bp);
3068                 bnx2_release_nvram_lock(bp);
3069
3070                 /* Increment written */
3071                 written += data_end - data_start;
3072         }
3073
3074 nvram_write_end:
3075         if (align_start || align_end)
3076                 kfree(buf);
3077         return rc;
3078 }
3079
3080 static int
3081 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3082 {
3083         u32 val;
3084         int i, rc = 0;
3085
3086         /* Wait for the current PCI transaction to complete before
3087          * issuing a reset. */
3088         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3089                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3090                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3091                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3092                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3093         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3094         udelay(5);
3095
3096         /* Wait for the firmware to tell us it is ok to issue a reset. */
3097         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3098
3099         /* Deposit a driver reset signature so the firmware knows that
3100          * this is a soft reset. */
3101         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3102                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3103
3104         /* Do a dummy read to force the chip to complete all current transaction
3105          * before we issue a reset. */
3106         val = REG_RD(bp, BNX2_MISC_ID);
3107
3108         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3109               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3110               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3111
3112         /* Chip reset. */
3113         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3114
3115         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3116             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3117                 msleep(15);
3118
3119         /* Reset takes approximate 30 usec */
3120         for (i = 0; i < 10; i++) {
3121                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3122                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3123                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3124                         break;
3125                 }
3126                 udelay(10);
3127         }
3128
3129         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3130                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3131                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3132                 return -EBUSY;
3133         }
3134
3135         /* Make sure byte swapping is properly configured. */
3136         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3137         if (val != 0x01020304) {
3138                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3139                 return -ENODEV;
3140         }
3141
3142         /* Wait for the firmware to finish its initialization. */
3143         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3144         if (rc)
3145                 return rc;
3146
3147         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3148                 /* Adjust the voltage regular to two steps lower.  The default
3149                  * of this register is 0x0000000e. */
3150                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3151
3152                 /* Remove bad rbuf memory from the free pool. */
3153                 rc = bnx2_alloc_bad_rbuf(bp);
3154         }
3155
3156         return rc;
3157 }
3158
3159 static int
3160 bnx2_init_chip(struct bnx2 *bp)
3161 {
3162         u32 val;
3163         int rc;
3164
3165         /* Make sure the interrupt is not active. */
3166         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3167
3168         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3169               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3170 #ifdef __BIG_ENDIAN
3171               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3172 #endif
3173               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3174               DMA_READ_CHANS << 12 |
3175               DMA_WRITE_CHANS << 16;
3176
3177         val |= (0x2 << 20) | (1 << 11);
3178
3179         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3180                 val |= (1 << 23);
3181
3182         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3183             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3184                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3185
3186         REG_WR(bp, BNX2_DMA_CONFIG, val);
3187
3188         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3189                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3190                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3191                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3192         }
3193
3194         if (bp->flags & PCIX_FLAG) {
3195                 u16 val16;
3196
3197                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3198                                      &val16);
3199                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3200                                       val16 & ~PCI_X_CMD_ERO);
3201         }
3202
3203         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3204                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3205                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3206                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3207
3208         /* Initialize context mapping and zero out the quick contexts.  The
3209          * context block must have already been enabled. */
3210         bnx2_init_context(bp);
3211
3212         bnx2_init_cpus(bp);
3213         bnx2_init_nvram(bp);
3214
3215         bnx2_set_mac_addr(bp);
3216
3217         val = REG_RD(bp, BNX2_MQ_CONFIG);
3218         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3219         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3220         REG_WR(bp, BNX2_MQ_CONFIG, val);
3221
3222         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3223         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3224         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3225
3226         val = (BCM_PAGE_BITS - 8) << 24;
3227         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3228
3229         /* Configure page size. */
3230         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3231         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3232         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3233         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3234
3235         val = bp->mac_addr[0] +
3236               (bp->mac_addr[1] << 8) +
3237               (bp->mac_addr[2] << 16) +
3238               bp->mac_addr[3] +
3239               (bp->mac_addr[4] << 8) +
3240               (bp->mac_addr[5] << 16);
3241         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3242
3243         /* Program the MTU.  Also include 4 bytes for CRC32. */
3244         val = bp->dev->mtu + ETH_HLEN + 4;
3245         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3246                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3247         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3248
3249         bp->last_status_idx = 0;
3250         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3251
3252         /* Set up how to generate a link change interrupt. */
3253         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3254
3255         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3256                (u64) bp->status_blk_mapping & 0xffffffff);
3257         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3258
3259         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3260                (u64) bp->stats_blk_mapping & 0xffffffff);
3261         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3262                (u64) bp->stats_blk_mapping >> 32);
3263
3264         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3265                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3266
3267         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3268                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3269
3270         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3271                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3272
3273         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3274
3275         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3276
3277         REG_WR(bp, BNX2_HC_COM_TICKS,
3278                (bp->com_ticks_int << 16) | bp->com_ticks);
3279
3280         REG_WR(bp, BNX2_HC_CMD_TICKS,
3281                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3282
3283         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3284         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3285
3286         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3287                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3288         else {
3289                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3290                        BNX2_HC_CONFIG_TX_TMR_MODE |
3291                        BNX2_HC_CONFIG_COLLECT_STATS);
3292         }
3293
3294         /* Clear internal stats counters. */
3295         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3296
3297         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3298
3299         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3300             BNX2_PORT_FEATURE_ASF_ENABLED)
3301                 bp->flags |= ASF_ENABLE_FLAG;
3302
3303         /* Initialize the receive filter. */
3304         bnx2_set_rx_mode(bp->dev);
3305
3306         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3307                           0);
3308
3309         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3310         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3311
3312         udelay(20);
3313
3314         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3315
3316         return rc;
3317 }
3318
3319
3320 static void
3321 bnx2_init_tx_ring(struct bnx2 *bp)
3322 {
3323         struct tx_bd *txbd;
3324         u32 val;
3325
3326         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3327                 
3328         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3329         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3330
3331         bp->tx_prod = 0;
3332         bp->tx_cons = 0;
3333         bp->hw_tx_cons = 0;
3334         bp->tx_prod_bseq = 0;
3335         
3336         val = BNX2_L2CTX_TYPE_TYPE_L2;
3337         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3338         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3339
3340         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3341         val |= 8 << 16;
3342         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3343
3344         val = (u64) bp->tx_desc_mapping >> 32;
3345         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3346
3347         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3348         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3349 }
3350
3351 static void
3352 bnx2_init_rx_ring(struct bnx2 *bp)
3353 {
3354         struct rx_bd *rxbd;
3355         int i;
3356         u16 prod, ring_prod; 
3357         u32 val;
3358
3359         /* 8 for CRC and VLAN */
3360         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3361         /* 8 for alignment */
3362         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3363
3364         ring_prod = prod = bp->rx_prod = 0;
3365         bp->rx_cons = 0;
3366         bp->hw_rx_cons = 0;
3367         bp->rx_prod_bseq = 0;
3368                 
3369         for (i = 0; i < bp->rx_max_ring; i++) {
3370                 int j;
3371
3372                 rxbd = &bp->rx_desc_ring[i][0];
3373                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3374                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3375                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3376                 }
3377                 if (i == (bp->rx_max_ring - 1))
3378                         j = 0;
3379                 else
3380                         j = i + 1;
3381                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3382                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3383                                        0xffffffff;
3384         }
3385
3386         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3387         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3388         val |= 0x02 << 8;
3389         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3390
3391         val = (u64) bp->rx_desc_mapping[0] >> 32;
3392         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3393
3394         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3395         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3396
3397         for (i = 0; i < bp->rx_ring_size; i++) {
3398                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3399                         break;
3400                 }
3401                 prod = NEXT_RX_BD(prod);
3402                 ring_prod = RX_RING_IDX(prod);
3403         }
3404         bp->rx_prod = prod;
3405
3406         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3407
3408         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3409 }
3410
3411 static void
3412 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3413 {
3414         u32 num_rings, max;
3415
3416         bp->rx_ring_size = size;
3417         num_rings = 1;
3418         while (size > MAX_RX_DESC_CNT) {
3419                 size -= MAX_RX_DESC_CNT;
3420                 num_rings++;
3421         }
3422         /* round to next power of 2 */
3423         max = MAX_RX_RINGS;
3424         while ((max & num_rings) == 0)
3425                 max >>= 1;
3426
3427         if (num_rings != max)
3428                 max <<= 1;
3429
3430         bp->rx_max_ring = max;
3431         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3432 }
3433
3434 static void
3435 bnx2_free_tx_skbs(struct bnx2 *bp)
3436 {
3437         int i;
3438
3439         if (bp->tx_buf_ring == NULL)
3440                 return;
3441
3442         for (i = 0; i < TX_DESC_CNT; ) {
3443                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3444                 struct sk_buff *skb = tx_buf->skb;
3445                 int j, last;
3446
3447                 if (skb == NULL) {
3448                         i++;
3449                         continue;
3450                 }
3451
3452                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3453                         skb_headlen(skb), PCI_DMA_TODEVICE);
3454
3455                 tx_buf->skb = NULL;
3456
3457                 last = skb_shinfo(skb)->nr_frags;
3458                 for (j = 0; j < last; j++) {
3459                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3460                         pci_unmap_page(bp->pdev,
3461                                 pci_unmap_addr(tx_buf, mapping),
3462                                 skb_shinfo(skb)->frags[j].size,
3463                                 PCI_DMA_TODEVICE);
3464                 }
3465                 dev_kfree_skb_any(skb);
3466                 i += j + 1;
3467         }
3468
3469 }
3470
3471 static void
3472 bnx2_free_rx_skbs(struct bnx2 *bp)
3473 {
3474         int i;
3475
3476         if (bp->rx_buf_ring == NULL)
3477                 return;
3478
3479         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3480                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3481                 struct sk_buff *skb = rx_buf->skb;
3482
3483                 if (skb == NULL)
3484                         continue;
3485
3486                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3487                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3488
3489                 rx_buf->skb = NULL;
3490
3491                 dev_kfree_skb_any(skb);
3492         }
3493 }
3494
3495 static void
3496 bnx2_free_skbs(struct bnx2 *bp)
3497 {
3498         bnx2_free_tx_skbs(bp);
3499         bnx2_free_rx_skbs(bp);
3500 }
3501
3502 static int
3503 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3504 {
3505         int rc;
3506
3507         rc = bnx2_reset_chip(bp, reset_code);
3508         bnx2_free_skbs(bp);
3509         if (rc)
3510                 return rc;
3511
3512         bnx2_init_chip(bp);
3513         bnx2_init_tx_ring(bp);
3514         bnx2_init_rx_ring(bp);
3515         return 0;
3516 }
3517
3518 static int
3519 bnx2_init_nic(struct bnx2 *bp)
3520 {
3521         int rc;
3522
3523         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3524                 return rc;
3525
3526         bnx2_init_phy(bp);
3527         bnx2_set_link(bp);
3528         return 0;
3529 }
3530
3531 static int
3532 bnx2_test_registers(struct bnx2 *bp)
3533 {
3534         int ret;
3535         int i;
3536         static const struct {
3537                 u16   offset;
3538                 u16   flags;
3539                 u32   rw_mask;
3540                 u32   ro_mask;
3541         } reg_tbl[] = {
3542                 { 0x006c, 0, 0x00000000, 0x0000003f },
3543                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3544                 { 0x0094, 0, 0x00000000, 0x00000000 },
3545
3546                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3547                 { 0x0418, 0, 0x00000000, 0xffffffff },
3548                 { 0x041c, 0, 0x00000000, 0xffffffff },
3549                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3550                 { 0x0424, 0, 0x00000000, 0x00000000 },
3551                 { 0x0428, 0, 0x00000000, 0x00000001 },
3552                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3553                 { 0x0454, 0, 0x00000000, 0xffffffff },
3554                 { 0x0458, 0, 0x00000000, 0xffffffff },
3555
3556                 { 0x0808, 0, 0x00000000, 0xffffffff },
3557                 { 0x0854, 0, 0x00000000, 0xffffffff },
3558                 { 0x0868, 0, 0x00000000, 0x77777777 },
3559                 { 0x086c, 0, 0x00000000, 0x77777777 },
3560                 { 0x0870, 0, 0x00000000, 0x77777777 },
3561                 { 0x0874, 0, 0x00000000, 0x77777777 },
3562
3563                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3564                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3565                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3566
3567                 { 0x1000, 0, 0x00000000, 0x00000001 },
3568                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3569
3570                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3571                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3572                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3573                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3574                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3575                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3576                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3577                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3578                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3579                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3580
3581                 { 0x1800, 0, 0x00000000, 0x00000001 },
3582                 { 0x1804, 0, 0x00000000, 0x00000003 },
3583
3584                 { 0x2800, 0, 0x00000000, 0x00000001 },
3585                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3586                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3587                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3588                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3589                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3590                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3591                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3592                 { 0x2840, 0, 0x00000000, 0xffffffff },
3593                 { 0x2844, 0, 0x00000000, 0xffffffff },
3594                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3595                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3596
3597                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3598                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3599
3600                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3601                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3602                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3603                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3604                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3605                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3606                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3607                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3608                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3609
3610                 { 0x5004, 0, 0x00000000, 0x0000007f },
3611                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3612                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3613
3614                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3615                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3616                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3617                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3618                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3619                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3620                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3621                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3622                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3623
3624                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3625                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3626                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3627                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3628                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3629                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3630                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3631                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3632                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3633                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3634                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3635                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3636                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3637                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3638                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3639                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3640                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3641                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3642                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3643                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3644                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3645                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3646                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3647
3648                 { 0xffff, 0, 0x00000000, 0x00000000 },
3649         };
3650
3651         ret = 0;
3652         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3653                 u32 offset, rw_mask, ro_mask, save_val, val;
3654
3655                 offset = (u32) reg_tbl[i].offset;
3656                 rw_mask = reg_tbl[i].rw_mask;
3657                 ro_mask = reg_tbl[i].ro_mask;
3658
3659                 save_val = readl(bp->regview + offset);
3660
3661                 writel(0, bp->regview + offset);
3662
3663                 val = readl(bp->regview + offset);
3664                 if ((val & rw_mask) != 0) {
3665                         goto reg_test_err;
3666                 }
3667
3668                 if ((val & ro_mask) != (save_val & ro_mask)) {
3669                         goto reg_test_err;
3670                 }
3671
3672                 writel(0xffffffff, bp->regview + offset);
3673
3674                 val = readl(bp->regview + offset);
3675                 if ((val & rw_mask) != rw_mask) {
3676                         goto reg_test_err;
3677                 }
3678
3679                 if ((val & ro_mask) != (save_val & ro_mask)) {
3680                         goto reg_test_err;
3681                 }
3682
3683                 writel(save_val, bp->regview + offset);
3684                 continue;
3685
3686 reg_test_err:
3687                 writel(save_val, bp->regview + offset);
3688                 ret = -ENODEV;
3689                 break;
3690         }
3691         return ret;
3692 }
3693
3694 static int
3695 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3696 {
3697         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3698                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3699         int i;
3700
3701         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3702                 u32 offset;
3703
3704                 for (offset = 0; offset < size; offset += 4) {
3705
3706                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3707
3708                         if (REG_RD_IND(bp, start + offset) !=
3709                                 test_pattern[i]) {
3710                                 return -ENODEV;
3711                         }
3712                 }
3713         }
3714         return 0;
3715 }
3716
3717 static int
3718 bnx2_test_memory(struct bnx2 *bp)
3719 {
3720         int ret = 0;
3721         int i;
3722         static const struct {
3723                 u32   offset;
3724                 u32   len;
3725         } mem_tbl[] = {
3726                 { 0x60000,  0x4000 },
3727                 { 0xa0000,  0x3000 },
3728                 { 0xe0000,  0x4000 },
3729                 { 0x120000, 0x4000 },
3730                 { 0x1a0000, 0x4000 },
3731                 { 0x160000, 0x4000 },
3732                 { 0xffffffff, 0    },
3733         };
3734
3735         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3736                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3737                         mem_tbl[i].len)) != 0) {
3738                         return ret;
3739                 }
3740         }
3741         
3742         return ret;
3743 }
3744
3745 #define BNX2_MAC_LOOPBACK       0
3746 #define BNX2_PHY_LOOPBACK       1
3747
3748 static int
3749 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3750 {
3751         unsigned int pkt_size, num_pkts, i;
3752         struct sk_buff *skb, *rx_skb;
3753         unsigned char *packet;
3754         u16 rx_start_idx, rx_idx;
3755         dma_addr_t map;
3756         struct tx_bd *txbd;
3757         struct sw_bd *rx_buf;
3758         struct l2_fhdr *rx_hdr;
3759         int ret = -ENODEV;
3760
3761         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3762                 bp->loopback = MAC_LOOPBACK;
3763                 bnx2_set_mac_loopback(bp);
3764         }
3765         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3766                 bp->loopback = 0;
3767                 bnx2_set_phy_loopback(bp);
3768         }
3769         else
3770                 return -EINVAL;
3771
3772         pkt_size = 1514;
3773         skb = dev_alloc_skb(pkt_size);
3774         if (!skb)
3775                 return -ENOMEM;
3776         packet = skb_put(skb, pkt_size);
3777         memcpy(packet, bp->mac_addr, 6);
3778         memset(packet + 6, 0x0, 8);
3779         for (i = 14; i < pkt_size; i++)
3780                 packet[i] = (unsigned char) (i & 0xff);
3781
3782         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3783                 PCI_DMA_TODEVICE);
3784
3785         REG_WR(bp, BNX2_HC_COMMAND,
3786                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3787
3788         REG_RD(bp, BNX2_HC_COMMAND);
3789
3790         udelay(5);
3791         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3792
3793         num_pkts = 0;
3794
3795         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3796
3797         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3798         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3799         txbd->tx_bd_mss_nbytes = pkt_size;
3800         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3801
3802         num_pkts++;
3803         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3804         bp->tx_prod_bseq += pkt_size;
3805
3806         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3807         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3808
3809         udelay(100);
3810
3811         REG_WR(bp, BNX2_HC_COMMAND,
3812                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3813
3814         REG_RD(bp, BNX2_HC_COMMAND);
3815
3816         udelay(5);
3817
3818         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3819         dev_kfree_skb_irq(skb);
3820
3821         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3822                 goto loopback_test_done;
3823         }
3824
3825         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3826         if (rx_idx != rx_start_idx + num_pkts) {
3827                 goto loopback_test_done;
3828         }
3829
3830         rx_buf = &bp->rx_buf_ring[rx_start_idx];
3831         rx_skb = rx_buf->skb;
3832
3833         rx_hdr = (struct l2_fhdr *) rx_skb->data;
3834         skb_reserve(rx_skb, bp->rx_offset);
3835
3836         pci_dma_sync_single_for_cpu(bp->pdev,
3837                 pci_unmap_addr(rx_buf, mapping),
3838                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3839
3840         if (rx_hdr->l2_fhdr_status &
3841                 (L2_FHDR_ERRORS_BAD_CRC |
3842                 L2_FHDR_ERRORS_PHY_DECODE |
3843                 L2_FHDR_ERRORS_ALIGNMENT |
3844                 L2_FHDR_ERRORS_TOO_SHORT |
3845                 L2_FHDR_ERRORS_GIANT_FRAME)) {
3846
3847                 goto loopback_test_done;
3848         }
3849
3850         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3851                 goto loopback_test_done;
3852         }
3853
3854         for (i = 14; i < pkt_size; i++) {
3855                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3856                         goto loopback_test_done;
3857                 }
3858         }
3859
3860         ret = 0;
3861
3862 loopback_test_done:
3863         bp->loopback = 0;
3864         return ret;
3865 }
3866
3867 #define BNX2_MAC_LOOPBACK_FAILED        1
3868 #define BNX2_PHY_LOOPBACK_FAILED        2
3869 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
3870                                          BNX2_PHY_LOOPBACK_FAILED)
3871
3872 static int
3873 bnx2_test_loopback(struct bnx2 *bp)
3874 {
3875         int rc = 0;
3876
3877         if (!netif_running(bp->dev))
3878                 return BNX2_LOOPBACK_FAILED;
3879
3880         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3881         spin_lock_bh(&bp->phy_lock);
3882         bnx2_init_phy(bp);
3883         spin_unlock_bh(&bp->phy_lock);
3884         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3885                 rc |= BNX2_MAC_LOOPBACK_FAILED;
3886         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3887                 rc |= BNX2_PHY_LOOPBACK_FAILED;
3888         return rc;
3889 }
3890
3891 #define NVRAM_SIZE 0x200
3892 #define CRC32_RESIDUAL 0xdebb20e3
3893
3894 static int
3895 bnx2_test_nvram(struct bnx2 *bp)
3896 {
3897         u32 buf[NVRAM_SIZE / 4];
3898         u8 *data = (u8 *) buf;
3899         int rc = 0;
3900         u32 magic, csum;
3901
3902         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3903                 goto test_nvram_done;
3904
3905         magic = be32_to_cpu(buf[0]);
3906         if (magic != 0x669955aa) {
3907                 rc = -ENODEV;
3908                 goto test_nvram_done;
3909         }
3910
3911         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3912                 goto test_nvram_done;
3913
3914         csum = ether_crc_le(0x100, data);
3915         if (csum != CRC32_RESIDUAL) {
3916                 rc = -ENODEV;
3917                 goto test_nvram_done;
3918         }
3919
3920         csum = ether_crc_le(0x100, data + 0x100);
3921         if (csum != CRC32_RESIDUAL) {
3922                 rc = -ENODEV;
3923         }
3924
3925 test_nvram_done:
3926         return rc;
3927 }
3928
3929 static int
3930 bnx2_test_link(struct bnx2 *bp)
3931 {
3932         u32 bmsr;
3933
3934         spin_lock_bh(&bp->phy_lock);
3935         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3936         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3937         spin_unlock_bh(&bp->phy_lock);
3938                 
3939         if (bmsr & BMSR_LSTATUS) {
3940                 return 0;
3941         }
3942         return -ENODEV;
3943 }
3944
3945 static int
3946 bnx2_test_intr(struct bnx2 *bp)
3947 {
3948         int i;
3949         u16 status_idx;
3950
3951         if (!netif_running(bp->dev))
3952                 return -ENODEV;
3953
3954         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3955
3956         /* This register is not touched during run-time. */
3957         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3958         REG_RD(bp, BNX2_HC_COMMAND);
3959
3960         for (i = 0; i < 10; i++) {
3961                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3962                         status_idx) {
3963
3964                         break;
3965                 }
3966
3967                 msleep_interruptible(10);
3968         }
3969         if (i < 10)
3970                 return 0;
3971
3972         return -ENODEV;
3973 }
3974
3975 static void
3976 bnx2_timer(unsigned long data)
3977 {
3978         struct bnx2 *bp = (struct bnx2 *) data;
3979         u32 msg;
3980
3981         if (!netif_running(bp->dev))
3982                 return;
3983
3984         if (atomic_read(&bp->intr_sem) != 0)
3985                 goto bnx2_restart_timer;
3986
3987         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3988         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3989
3990         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3991             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3992
3993                 spin_lock(&bp->phy_lock);
3994                 if (bp->serdes_an_pending) {
3995                         bp->serdes_an_pending--;
3996                 }
3997                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3998                         u32 bmcr;
3999
4000                         bp->current_interval = bp->timer_interval;
4001
4002                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4003
4004                         if (bmcr & BMCR_ANENABLE) {
4005                                 u32 phy1, phy2;
4006
4007                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4008                                 bnx2_read_phy(bp, 0x1c, &phy1);
4009
4010                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4011                                 bnx2_read_phy(bp, 0x15, &phy2);
4012                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4013                                 bnx2_read_phy(bp, 0x15, &phy2);
4014
4015                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4016                                         !(phy2 & 0x20)) {       /* no CONFIG */
4017
4018                                         bmcr &= ~BMCR_ANENABLE;
4019                                         bmcr |= BMCR_SPEED1000 |
4020                                                 BMCR_FULLDPLX;
4021                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4022                                         bp->phy_flags |=
4023                                                 PHY_PARALLEL_DETECT_FLAG;
4024                                 }
4025                         }
4026                 }
4027                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4028                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4029                         u32 phy2;
4030
4031                         bnx2_write_phy(bp, 0x17, 0x0f01);
4032                         bnx2_read_phy(bp, 0x15, &phy2);
4033                         if (phy2 & 0x20) {
4034                                 u32 bmcr;
4035
4036                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4037                                 bmcr |= BMCR_ANENABLE;
4038                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4039
4040                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4041
4042                         }
4043                 }
4044                 else
4045                         bp->current_interval = bp->timer_interval;
4046
4047                 spin_unlock(&bp->phy_lock);
4048         }
4049
4050 bnx2_restart_timer:
4051         mod_timer(&bp->timer, jiffies + bp->current_interval);
4052 }
4053
4054 /* Called with rtnl_lock */
4055 static int
4056 bnx2_open(struct net_device *dev)
4057 {
4058         struct bnx2 *bp = netdev_priv(dev);
4059         int rc;
4060
4061         bnx2_set_power_state(bp, PCI_D0);
4062         bnx2_disable_int(bp);
4063
4064         rc = bnx2_alloc_mem(bp);
4065         if (rc)
4066                 return rc;
4067
4068         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4069                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4070                 !disable_msi) {
4071
4072                 if (pci_enable_msi(bp->pdev) == 0) {
4073                         bp->flags |= USING_MSI_FLAG;
4074                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4075                                         dev);
4076                 }
4077                 else {
4078                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4079                                         SA_SHIRQ, dev->name, dev);
4080                 }
4081         }
4082         else {
4083                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4084                                 dev->name, dev);
4085         }
4086         if (rc) {
4087                 bnx2_free_mem(bp);
4088                 return rc;
4089         }
4090
4091         rc = bnx2_init_nic(bp);
4092
4093         if (rc) {
4094                 free_irq(bp->pdev->irq, dev);
4095                 if (bp->flags & USING_MSI_FLAG) {
4096                         pci_disable_msi(bp->pdev);
4097                         bp->flags &= ~USING_MSI_FLAG;
4098                 }
4099                 bnx2_free_skbs(bp);
4100                 bnx2_free_mem(bp);
4101                 return rc;
4102         }
4103         
4104         mod_timer(&bp->timer, jiffies + bp->current_interval);
4105
4106         atomic_set(&bp->intr_sem, 0);
4107
4108         bnx2_enable_int(bp);
4109
4110         if (bp->flags & USING_MSI_FLAG) {
4111                 /* Test MSI to make sure it is working
4112                  * If MSI test fails, go back to INTx mode
4113                  */
4114                 if (bnx2_test_intr(bp) != 0) {
4115                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4116                                " using MSI, switching to INTx mode. Please"
4117                                " report this failure to the PCI maintainer"
4118                                " and include system chipset information.\n",
4119                                bp->dev->name);
4120
4121                         bnx2_disable_int(bp);
4122                         free_irq(bp->pdev->irq, dev);
4123                         pci_disable_msi(bp->pdev);
4124                         bp->flags &= ~USING_MSI_FLAG;
4125
4126                         rc = bnx2_init_nic(bp);
4127
4128                         if (!rc) {
4129                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4130                                         SA_SHIRQ, dev->name, dev);
4131                         }
4132                         if (rc) {
4133                                 bnx2_free_skbs(bp);
4134                                 bnx2_free_mem(bp);
4135                                 del_timer_sync(&bp->timer);
4136                                 return rc;
4137                         }
4138                         bnx2_enable_int(bp);
4139                 }
4140         }
4141         if (bp->flags & USING_MSI_FLAG) {
4142                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4143         }
4144
4145         netif_start_queue(dev);
4146
4147         return 0;
4148 }
4149
4150 static void
4151 bnx2_reset_task(void *data)
4152 {
4153         struct bnx2 *bp = data;
4154
4155         if (!netif_running(bp->dev))
4156                 return;
4157
4158         bp->in_reset_task = 1;
4159         bnx2_netif_stop(bp);
4160
4161         bnx2_init_nic(bp);
4162
4163         atomic_set(&bp->intr_sem, 1);
4164         bnx2_netif_start(bp);
4165         bp->in_reset_task = 0;
4166 }
4167
4168 static void
4169 bnx2_tx_timeout(struct net_device *dev)
4170 {
4171         struct bnx2 *bp = netdev_priv(dev);
4172
4173         /* This allows the netif to be shutdown gracefully before resetting */
4174         schedule_work(&bp->reset_task);
4175 }
4176
4177 #ifdef BCM_VLAN
4178 /* Called with rtnl_lock */
4179 static void
4180 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4181 {
4182         struct bnx2 *bp = netdev_priv(dev);
4183
4184         bnx2_netif_stop(bp);
4185
4186         bp->vlgrp = vlgrp;
4187         bnx2_set_rx_mode(dev);
4188
4189         bnx2_netif_start(bp);
4190 }
4191
4192 /* Called with rtnl_lock */
4193 static void
4194 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4195 {
4196         struct bnx2 *bp = netdev_priv(dev);
4197
4198         bnx2_netif_stop(bp);
4199
4200         if (bp->vlgrp)
4201                 bp->vlgrp->vlan_devices[vid] = NULL;
4202         bnx2_set_rx_mode(dev);
4203
4204         bnx2_netif_start(bp);
4205 }
4206 #endif
4207
4208 /* Called with dev->xmit_lock.
4209  * hard_start_xmit is pseudo-lockless - a lock is only required when
4210  * the tx queue is full. This way, we get the benefit of lockless
4211  * operations most of the time without the complexities to handle
4212  * netif_stop_queue/wake_queue race conditions.
4213  */
4214 static int
4215 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4216 {
4217         struct bnx2 *bp = netdev_priv(dev);
4218         dma_addr_t mapping;
4219         struct tx_bd *txbd;
4220         struct sw_bd *tx_buf;
4221         u32 len, vlan_tag_flags, last_frag, mss;
4222         u16 prod, ring_prod;
4223         int i;
4224
4225         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4226                 netif_stop_queue(dev);
4227                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4228                         dev->name);
4229
4230                 return NETDEV_TX_BUSY;
4231         }
4232         len = skb_headlen(skb);
4233         prod = bp->tx_prod;
4234         ring_prod = TX_RING_IDX(prod);
4235
4236         vlan_tag_flags = 0;
4237         if (skb->ip_summed == CHECKSUM_HW) {
4238                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4239         }
4240
4241         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4242                 vlan_tag_flags |=
4243                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4244         }
4245 #ifdef BCM_TSO 
4246         if ((mss = skb_shinfo(skb)->tso_size) &&
4247                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4248                 u32 tcp_opt_len, ip_tcp_len;
4249
4250                 if (skb_header_cloned(skb) &&
4251                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4252                         dev_kfree_skb(skb);
4253                         return NETDEV_TX_OK;
4254                 }
4255
4256                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4257                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4258
4259                 tcp_opt_len = 0;
4260                 if (skb->h.th->doff > 5) {
4261                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4262                 }
4263                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4264
4265                 skb->nh.iph->check = 0;
4266                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4267                 skb->h.th->check =
4268                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4269                                             skb->nh.iph->daddr,
4270                                             0, IPPROTO_TCP, 0);
4271
4272                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4273                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4274                                 (tcp_opt_len >> 2)) << 8;
4275                 }
4276         }
4277         else
4278 #endif
4279         {
4280                 mss = 0;
4281         }
4282
4283         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4284         
4285         tx_buf = &bp->tx_buf_ring[ring_prod];
4286         tx_buf->skb = skb;
4287         pci_unmap_addr_set(tx_buf, mapping, mapping);
4288
4289         txbd = &bp->tx_desc_ring[ring_prod];
4290
4291         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4292         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4293         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4294         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4295
4296         last_frag = skb_shinfo(skb)->nr_frags;
4297
4298         for (i = 0; i < last_frag; i++) {
4299                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4300
4301                 prod = NEXT_TX_BD(prod);
4302                 ring_prod = TX_RING_IDX(prod);
4303                 txbd = &bp->tx_desc_ring[ring_prod];
4304
4305                 len = frag->size;
4306                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4307                         len, PCI_DMA_TODEVICE);
4308                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4309                                 mapping, mapping);
4310
4311                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4312                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4313                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4314                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4315
4316         }
4317         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4318
4319         prod = NEXT_TX_BD(prod);
4320         bp->tx_prod_bseq += skb->len;
4321
4322         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4323         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4324
4325         mmiowb();
4326
4327         bp->tx_prod = prod;
4328         dev->trans_start = jiffies;
4329
4330         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4331                 spin_lock(&bp->tx_lock);
4332                 netif_stop_queue(dev);
4333                 
4334                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4335                         netif_wake_queue(dev);
4336                 spin_unlock(&bp->tx_lock);
4337         }
4338
4339         return NETDEV_TX_OK;
4340 }
4341
4342 /* Called with rtnl_lock */
4343 static int
4344 bnx2_close(struct net_device *dev)
4345 {
4346         struct bnx2 *bp = netdev_priv(dev);
4347         u32 reset_code;
4348
4349         /* Calling flush_scheduled_work() may deadlock because
4350          * linkwatch_event() may be on the workqueue and it will try to get
4351          * the rtnl_lock which we are holding.
4352          */
4353         while (bp->in_reset_task)
4354                 msleep(1);
4355
4356         bnx2_netif_stop(bp);
4357         del_timer_sync(&bp->timer);
4358         if (bp->flags & NO_WOL_FLAG)
4359                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4360         else if (bp->wol)
4361                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4362         else
4363                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4364         bnx2_reset_chip(bp, reset_code);
4365         free_irq(bp->pdev->irq, dev);
4366         if (bp->flags & USING_MSI_FLAG) {
4367                 pci_disable_msi(bp->pdev);
4368                 bp->flags &= ~USING_MSI_FLAG;
4369         }
4370         bnx2_free_skbs(bp);
4371         bnx2_free_mem(bp);
4372         bp->link_up = 0;
4373         netif_carrier_off(bp->dev);
4374         bnx2_set_power_state(bp, PCI_D3hot);
4375         return 0;
4376 }
4377
4378 #define GET_NET_STATS64(ctr)                                    \
4379         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4380         (unsigned long) (ctr##_lo)
4381
4382 #define GET_NET_STATS32(ctr)            \
4383         (ctr##_lo)
4384
4385 #if (BITS_PER_LONG == 64)
4386 #define GET_NET_STATS   GET_NET_STATS64
4387 #else
4388 #define GET_NET_STATS   GET_NET_STATS32
4389 #endif
4390
4391 static struct net_device_stats *
4392 bnx2_get_stats(struct net_device *dev)
4393 {
4394         struct bnx2 *bp = netdev_priv(dev);
4395         struct statistics_block *stats_blk = bp->stats_blk;
4396         struct net_device_stats *net_stats = &bp->net_stats;
4397
4398         if (bp->stats_blk == NULL) {
4399                 return net_stats;
4400         }
4401         net_stats->rx_packets =
4402                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4403                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4404                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4405
4406         net_stats->tx_packets =
4407                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4408                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4409                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4410
4411         net_stats->rx_bytes =
4412                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4413
4414         net_stats->tx_bytes =
4415                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4416
4417         net_stats->multicast = 
4418                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4419
4420         net_stats->collisions = 
4421                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4422
4423         net_stats->rx_length_errors = 
4424                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4425                 stats_blk->stat_EtherStatsOverrsizePkts);
4426
4427         net_stats->rx_over_errors = 
4428                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4429
4430         net_stats->rx_frame_errors = 
4431                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4432
4433         net_stats->rx_crc_errors = 
4434                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4435
4436         net_stats->rx_errors = net_stats->rx_length_errors +
4437                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4438                 net_stats->rx_crc_errors;
4439
4440         net_stats->tx_aborted_errors =
4441                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4442                 stats_blk->stat_Dot3StatsLateCollisions);
4443
4444         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4445             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4446                 net_stats->tx_carrier_errors = 0;
4447         else {
4448                 net_stats->tx_carrier_errors =
4449                         (unsigned long)
4450                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4451         }
4452
4453         net_stats->tx_errors =
4454                 (unsigned long) 
4455                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4456                 +
4457                 net_stats->tx_aborted_errors +
4458                 net_stats->tx_carrier_errors;
4459
4460         return net_stats;
4461 }
4462
4463 /* All ethtool functions called with rtnl_lock */
4464
4465 static int
4466 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4467 {
4468         struct bnx2 *bp = netdev_priv(dev);
4469
4470         cmd->supported = SUPPORTED_Autoneg;
4471         if (bp->phy_flags & PHY_SERDES_FLAG) {
4472                 cmd->supported |= SUPPORTED_1000baseT_Full |
4473                         SUPPORTED_FIBRE;
4474
4475                 cmd->port = PORT_FIBRE;
4476         }
4477         else {
4478                 cmd->supported |= SUPPORTED_10baseT_Half |
4479                         SUPPORTED_10baseT_Full |
4480                         SUPPORTED_100baseT_Half |
4481                         SUPPORTED_100baseT_Full |
4482                         SUPPORTED_1000baseT_Full |
4483                         SUPPORTED_TP;
4484
4485                 cmd->port = PORT_TP;
4486         }
4487
4488         cmd->advertising = bp->advertising;
4489
4490         if (bp->autoneg & AUTONEG_SPEED) {
4491                 cmd->autoneg = AUTONEG_ENABLE;
4492         }
4493         else {
4494                 cmd->autoneg = AUTONEG_DISABLE;
4495         }
4496
4497         if (netif_carrier_ok(dev)) {
4498                 cmd->speed = bp->line_speed;
4499                 cmd->duplex = bp->duplex;
4500         }
4501         else {
4502                 cmd->speed = -1;
4503                 cmd->duplex = -1;
4504         }
4505
4506         cmd->transceiver = XCVR_INTERNAL;
4507         cmd->phy_address = bp->phy_addr;
4508
4509         return 0;
4510 }
4511   
4512 static int
4513 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4514 {
4515         struct bnx2 *bp = netdev_priv(dev);
4516         u8 autoneg = bp->autoneg;
4517         u8 req_duplex = bp->req_duplex;
4518         u16 req_line_speed = bp->req_line_speed;
4519         u32 advertising = bp->advertising;
4520
4521         if (cmd->autoneg == AUTONEG_ENABLE) {
4522                 autoneg |= AUTONEG_SPEED;
4523
4524                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4525
4526                 /* allow advertising 1 speed */
4527                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4528                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4529                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4530                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4531
4532                         if (bp->phy_flags & PHY_SERDES_FLAG)
4533                                 return -EINVAL;
4534
4535                         advertising = cmd->advertising;
4536
4537                 }
4538                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4539                         advertising = cmd->advertising;
4540                 }
4541                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4542                         return -EINVAL;
4543                 }
4544                 else {
4545                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4546                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4547                         }
4548                         else {
4549                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4550                         }
4551                 }
4552                 advertising |= ADVERTISED_Autoneg;
4553         }
4554         else {
4555                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4556                         if ((cmd->speed != SPEED_1000) ||
4557                                 (cmd->duplex != DUPLEX_FULL)) {
4558                                 return -EINVAL;
4559                         }
4560                 }
4561                 else if (cmd->speed == SPEED_1000) {
4562                         return -EINVAL;
4563                 }
4564                 autoneg &= ~AUTONEG_SPEED;
4565                 req_line_speed = cmd->speed;
4566                 req_duplex = cmd->duplex;
4567                 advertising = 0;
4568         }
4569
4570         bp->autoneg = autoneg;
4571         bp->advertising = advertising;
4572         bp->req_line_speed = req_line_speed;
4573         bp->req_duplex = req_duplex;
4574
4575         spin_lock_bh(&bp->phy_lock);
4576
4577         bnx2_setup_phy(bp);
4578
4579         spin_unlock_bh(&bp->phy_lock);
4580
4581         return 0;
4582 }
4583
4584 static void
4585 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4586 {
4587         struct bnx2 *bp = netdev_priv(dev);
4588
4589         strcpy(info->driver, DRV_MODULE_NAME);
4590         strcpy(info->version, DRV_MODULE_VERSION);
4591         strcpy(info->bus_info, pci_name(bp->pdev));
4592         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4593         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4594         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4595         info->fw_version[1] = info->fw_version[3] = '.';
4596         info->fw_version[5] = 0;
4597 }
4598
4599 #define BNX2_REGDUMP_LEN                (32 * 1024)
4600
4601 static int
4602 bnx2_get_regs_len(struct net_device *dev)
4603 {
4604         return BNX2_REGDUMP_LEN;
4605 }
4606
4607 static void
4608 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4609 {
4610         u32 *p = _p, i, offset;
4611         u8 *orig_p = _p;
4612         struct bnx2 *bp = netdev_priv(dev);
4613         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4614                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4615                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4616                                  0x1040, 0x1048, 0x1080, 0x10a4,
4617                                  0x1400, 0x1490, 0x1498, 0x14f0,
4618                                  0x1500, 0x155c, 0x1580, 0x15dc,
4619                                  0x1600, 0x1658, 0x1680, 0x16d8,
4620                                  0x1800, 0x1820, 0x1840, 0x1854,
4621                                  0x1880, 0x1894, 0x1900, 0x1984,
4622                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4623                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4624                                  0x2000, 0x2030, 0x23c0, 0x2400,
4625                                  0x2800, 0x2820, 0x2830, 0x2850,
4626                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4627                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4628                                  0x4080, 0x4090, 0x43c0, 0x4458,
4629                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4630                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4631                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4632                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4633                                  0x6800, 0x6848, 0x684c, 0x6860,
4634                                  0x6888, 0x6910, 0x8000 };
4635
4636         regs->version = 0;
4637
4638         memset(p, 0, BNX2_REGDUMP_LEN);
4639
4640         if (!netif_running(bp->dev))
4641                 return;
4642
4643         i = 0;
4644         offset = reg_boundaries[0];
4645         p += offset;
4646         while (offset < BNX2_REGDUMP_LEN) {
4647                 *p++ = REG_RD(bp, offset);
4648                 offset += 4;
4649                 if (offset == reg_boundaries[i + 1]) {
4650                         offset = reg_boundaries[i + 2];
4651                         p = (u32 *) (orig_p + offset);
4652                         i += 2;
4653                 }
4654         }
4655 }
4656
4657 static void
4658 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4659 {
4660         struct bnx2 *bp = netdev_priv(dev);
4661
4662         if (bp->flags & NO_WOL_FLAG) {
4663                 wol->supported = 0;
4664                 wol->wolopts = 0;
4665         }
4666         else {
4667                 wol->supported = WAKE_MAGIC;
4668                 if (bp->wol)
4669                         wol->wolopts = WAKE_MAGIC;
4670                 else
4671                         wol->wolopts = 0;
4672         }
4673         memset(&wol->sopass, 0, sizeof(wol->sopass));
4674 }
4675
4676 static int
4677 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4678 {
4679         struct bnx2 *bp = netdev_priv(dev);
4680
4681         if (wol->wolopts & ~WAKE_MAGIC)
4682                 return -EINVAL;
4683
4684         if (wol->wolopts & WAKE_MAGIC) {
4685                 if (bp->flags & NO_WOL_FLAG)
4686                         return -EINVAL;
4687
4688                 bp->wol = 1;
4689         }
4690         else {
4691                 bp->wol = 0;
4692         }
4693         return 0;
4694 }
4695
4696 static int
4697 bnx2_nway_reset(struct net_device *dev)
4698 {
4699         struct bnx2 *bp = netdev_priv(dev);
4700         u32 bmcr;
4701
4702         if (!(bp->autoneg & AUTONEG_SPEED)) {
4703                 return -EINVAL;
4704         }
4705
4706         spin_lock_bh(&bp->phy_lock);
4707
4708         /* Force a link down visible on the other side */
4709         if (bp->phy_flags & PHY_SERDES_FLAG) {
4710                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4711                 spin_unlock_bh(&bp->phy_lock);
4712
4713                 msleep(20);
4714
4715                 spin_lock_bh(&bp->phy_lock);
4716                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4717                         bp->current_interval = SERDES_AN_TIMEOUT;
4718                         bp->serdes_an_pending = 1;
4719                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4720                 }
4721         }
4722
4723         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4724         bmcr &= ~BMCR_LOOPBACK;
4725         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4726
4727         spin_unlock_bh(&bp->phy_lock);
4728
4729         return 0;
4730 }
4731
4732 static int
4733 bnx2_get_eeprom_len(struct net_device *dev)
4734 {
4735         struct bnx2 *bp = netdev_priv(dev);
4736
4737         if (bp->flash_info == NULL)
4738                 return 0;
4739
4740         return (int) bp->flash_size;
4741 }
4742
4743 static int
4744 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4745                 u8 *eebuf)
4746 {
4747         struct bnx2 *bp = netdev_priv(dev);
4748         int rc;
4749
4750         /* parameters already validated in ethtool_get_eeprom */
4751
4752         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4753
4754         return rc;
4755 }
4756
4757 static int
4758 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4759                 u8 *eebuf)
4760 {
4761         struct bnx2 *bp = netdev_priv(dev);
4762         int rc;
4763
4764         /* parameters already validated in ethtool_set_eeprom */
4765
4766         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4767
4768         return rc;
4769 }
4770
4771 static int
4772 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4773 {
4774         struct bnx2 *bp = netdev_priv(dev);
4775
4776         memset(coal, 0, sizeof(struct ethtool_coalesce));
4777
4778         coal->rx_coalesce_usecs = bp->rx_ticks;
4779         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4780         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4781         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4782
4783         coal->tx_coalesce_usecs = bp->tx_ticks;
4784         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4785         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4786         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4787
4788         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4789
4790         return 0;
4791 }
4792
4793 static int
4794 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4795 {
4796         struct bnx2 *bp = netdev_priv(dev);
4797
4798         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4799         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4800
4801         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4802         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4803
4804         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4805         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4806
4807         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4808         if (bp->rx_quick_cons_trip_int > 0xff)
4809                 bp->rx_quick_cons_trip_int = 0xff;
4810
4811         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4812         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4813
4814         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4815         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4816
4817         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4818         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4819
4820         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4821         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4822                 0xff;
4823
4824         bp->stats_ticks = coal->stats_block_coalesce_usecs;
4825         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4826         bp->stats_ticks &= 0xffff00;
4827
4828         if (netif_running(bp->dev)) {
4829                 bnx2_netif_stop(bp);
4830                 bnx2_init_nic(bp);
4831                 bnx2_netif_start(bp);
4832         }
4833
4834         return 0;
4835 }
4836
4837 static void
4838 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4839 {
4840         struct bnx2 *bp = netdev_priv(dev);
4841
4842         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4843         ering->rx_mini_max_pending = 0;
4844         ering->rx_jumbo_max_pending = 0;
4845
4846         ering->rx_pending = bp->rx_ring_size;
4847         ering->rx_mini_pending = 0;
4848         ering->rx_jumbo_pending = 0;
4849
4850         ering->tx_max_pending = MAX_TX_DESC_CNT;
4851         ering->tx_pending = bp->tx_ring_size;
4852 }
4853
4854 static int
4855 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4856 {
4857         struct bnx2 *bp = netdev_priv(dev);
4858
4859         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4860                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4861                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4862
4863                 return -EINVAL;
4864         }
4865         if (netif_running(bp->dev)) {
4866                 bnx2_netif_stop(bp);
4867                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4868                 bnx2_free_skbs(bp);
4869                 bnx2_free_mem(bp);
4870         }
4871
4872         bnx2_set_rx_ring_size(bp, ering->rx_pending);
4873         bp->tx_ring_size = ering->tx_pending;
4874
4875         if (netif_running(bp->dev)) {
4876                 int rc;
4877
4878                 rc = bnx2_alloc_mem(bp);
4879                 if (rc)
4880                         return rc;
4881                 bnx2_init_nic(bp);
4882                 bnx2_netif_start(bp);
4883         }
4884
4885         return 0;
4886 }
4887
4888 static void
4889 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4890 {
4891         struct bnx2 *bp = netdev_priv(dev);
4892
4893         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4894         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4895         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4896 }
4897
4898 static int
4899 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4900 {
4901         struct bnx2 *bp = netdev_priv(dev);
4902
4903         bp->req_flow_ctrl = 0;
4904         if (epause->rx_pause)
4905                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4906         if (epause->tx_pause)
4907                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4908
4909         if (epause->autoneg) {
4910                 bp->autoneg |= AUTONEG_FLOW_CTRL;
4911         }
4912         else {
4913                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4914         }
4915
4916         spin_lock_bh(&bp->phy_lock);
4917
4918         bnx2_setup_phy(bp);
4919
4920         spin_unlock_bh(&bp->phy_lock);
4921
4922         return 0;
4923 }
4924
4925 static u32
4926 bnx2_get_rx_csum(struct net_device *dev)
4927 {
4928         struct bnx2 *bp = netdev_priv(dev);
4929
4930         return bp->rx_csum;
4931 }
4932
4933 static int
4934 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4935 {
4936         struct bnx2 *bp = netdev_priv(dev);
4937
4938         bp->rx_csum = data;
4939         return 0;
4940 }
4941
4942 #define BNX2_NUM_STATS 45
4943
4944 static struct {
4945         char string[ETH_GSTRING_LEN];
4946 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4947         { "rx_bytes" },
4948         { "rx_error_bytes" },
4949         { "tx_bytes" },
4950         { "tx_error_bytes" },
4951         { "rx_ucast_packets" },
4952         { "rx_mcast_packets" },
4953         { "rx_bcast_packets" },
4954         { "tx_ucast_packets" },
4955         { "tx_mcast_packets" },
4956         { "tx_bcast_packets" },
4957         { "tx_mac_errors" },
4958         { "tx_carrier_errors" },
4959         { "rx_crc_errors" },
4960         { "rx_align_errors" },
4961         { "tx_single_collisions" },
4962         { "tx_multi_collisions" },
4963         { "tx_deferred" },
4964         { "tx_excess_collisions" },
4965         { "tx_late_collisions" },
4966         { "tx_total_collisions" },
4967         { "rx_fragments" },
4968         { "rx_jabbers" },
4969         { "rx_undersize_packets" },
4970         { "rx_oversize_packets" },
4971         { "rx_64_byte_packets" },
4972         { "rx_65_to_127_byte_packets" },
4973         { "rx_128_to_255_byte_packets" },
4974         { "rx_256_to_511_byte_packets" },
4975         { "rx_512_to_1023_byte_packets" },
4976         { "rx_1024_to_1522_byte_packets" },
4977         { "rx_1523_to_9022_byte_packets" },
4978         { "tx_64_byte_packets" },
4979         { "tx_65_to_127_byte_packets" },
4980         { "tx_128_to_255_byte_packets" },
4981         { "tx_256_to_511_byte_packets" },
4982         { "tx_512_to_1023_byte_packets" },
4983         { "tx_1024_to_1522_byte_packets" },
4984         { "tx_1523_to_9022_byte_packets" },
4985         { "rx_xon_frames" },
4986         { "rx_xoff_frames" },
4987         { "tx_xon_frames" },
4988         { "tx_xoff_frames" },
4989         { "rx_mac_ctrl_frames" },
4990         { "rx_filtered_packets" },
4991         { "rx_discards" },
4992 };
4993
4994 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4995
4996 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4997     STATS_OFFSET32(stat_IfHCInOctets_hi),
4998     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4999     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5000     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5001     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5002     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5003     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5004     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5005     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5006     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5007     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5008     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5009     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5010     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5011     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5012     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5013     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5014     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5015     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5016     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5017     STATS_OFFSET32(stat_EtherStatsFragments),                         
5018     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5019     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5020     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5021     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5022     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5023     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5024     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5025     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5026     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5027     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5028     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5029     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5030     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5031     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5032     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5033     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5034     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5035     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5036     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5037     STATS_OFFSET32(stat_OutXonSent),                                  
5038     STATS_OFFSET32(stat_OutXoffSent),                                 
5039     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5040     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5041     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5042 };
5043
5044 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5045  * skipped because of errata.
5046  */               
5047 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5048         8,0,8,8,8,8,8,8,8,8,
5049         4,0,4,4,4,4,4,4,4,4,
5050         4,4,4,4,4,4,4,4,4,4,
5051         4,4,4,4,4,4,4,4,4,4,
5052         4,4,4,4,4,
5053 };
5054
5055 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5056         8,0,8,8,8,8,8,8,8,8,
5057         4,4,4,4,4,4,4,4,4,4,
5058         4,4,4,4,4,4,4,4,4,4,
5059         4,4,4,4,4,4,4,4,4,4,
5060         4,4,4,4,4,
5061 };
5062
5063 #define BNX2_NUM_TESTS 6
5064
5065 static struct {
5066         char string[ETH_GSTRING_LEN];
5067 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5068         { "register_test (offline)" },
5069         { "memory_test (offline)" },
5070         { "loopback_test (offline)" },
5071         { "nvram_test (online)" },
5072         { "interrupt_test (online)" },
5073         { "link_test (online)" },
5074 };
5075
5076 static int
5077 bnx2_self_test_count(struct net_device *dev)
5078 {
5079         return BNX2_NUM_TESTS;
5080 }
5081
5082 static void
5083 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5084 {
5085         struct bnx2 *bp = netdev_priv(dev);
5086
5087         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5088         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5089                 bnx2_netif_stop(bp);
5090                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5091                 bnx2_free_skbs(bp);
5092
5093                 if (bnx2_test_registers(bp) != 0) {
5094                         buf[0] = 1;
5095                         etest->flags |= ETH_TEST_FL_FAILED;
5096                 }
5097                 if (bnx2_test_memory(bp) != 0) {
5098                         buf[1] = 1;
5099                         etest->flags |= ETH_TEST_FL_FAILED;
5100                 }
5101                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5102                         etest->flags |= ETH_TEST_FL_FAILED;
5103
5104                 if (!netif_running(bp->dev)) {
5105                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5106                 }
5107                 else {
5108                         bnx2_init_nic(bp);
5109                         bnx2_netif_start(bp);
5110                 }
5111
5112                 /* wait for link up */
5113                 msleep_interruptible(3000);
5114                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5115                         msleep_interruptible(4000);
5116         }
5117
5118         if (bnx2_test_nvram(bp) != 0) {
5119                 buf[3] = 1;
5120                 etest->flags |= ETH_TEST_FL_FAILED;
5121         }
5122         if (bnx2_test_intr(bp) != 0) {
5123                 buf[4] = 1;
5124                 etest->flags |= ETH_TEST_FL_FAILED;
5125         }
5126
5127         if (bnx2_test_link(bp) != 0) {
5128                 buf[5] = 1;
5129                 etest->flags |= ETH_TEST_FL_FAILED;
5130
5131         }
5132 }
5133
5134 static void
5135 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5136 {
5137         switch (stringset) {
5138         case ETH_SS_STATS:
5139                 memcpy(buf, bnx2_stats_str_arr,
5140                         sizeof(bnx2_stats_str_arr));
5141                 break;
5142         case ETH_SS_TEST:
5143                 memcpy(buf, bnx2_tests_str_arr,
5144                         sizeof(bnx2_tests_str_arr));
5145                 break;
5146         }
5147 }
5148
5149 static int
5150 bnx2_get_stats_count(struct net_device *dev)
5151 {
5152         return BNX2_NUM_STATS;
5153 }
5154
5155 static void
5156 bnx2_get_ethtool_stats(struct net_device *dev,
5157                 struct ethtool_stats *stats, u64 *buf)
5158 {
5159         struct bnx2 *bp = netdev_priv(dev);
5160         int i;
5161         u32 *hw_stats = (u32 *) bp->stats_blk;
5162         u8 *stats_len_arr = NULL;
5163
5164         if (hw_stats == NULL) {
5165                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5166                 return;
5167         }
5168
5169         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5170             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5171             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5172             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5173                 stats_len_arr = bnx2_5706_stats_len_arr;
5174         else
5175                 stats_len_arr = bnx2_5708_stats_len_arr;
5176
5177         for (i = 0; i < BNX2_NUM_STATS; i++) {
5178                 if (stats_len_arr[i] == 0) {
5179                         /* skip this counter */
5180                         buf[i] = 0;
5181                         continue;
5182                 }
5183                 if (stats_len_arr[i] == 4) {
5184                         /* 4-byte counter */
5185                         buf[i] = (u64)
5186                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5187                         continue;
5188                 }
5189                 /* 8-byte counter */
5190                 buf[i] = (((u64) *(hw_stats +
5191                                         bnx2_stats_offset_arr[i])) << 32) +
5192                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5193         }
5194 }
5195
5196 static int
5197 bnx2_phys_id(struct net_device *dev, u32 data)
5198 {
5199         struct bnx2 *bp = netdev_priv(dev);
5200         int i;
5201         u32 save;
5202
5203         if (data == 0)
5204                 data = 2;
5205
5206         save = REG_RD(bp, BNX2_MISC_CFG);
5207         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5208
5209         for (i = 0; i < (data * 2); i++) {
5210                 if ((i % 2) == 0) {
5211                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5212                 }
5213                 else {
5214                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5215                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5216                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5217                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5218                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5219                                 BNX2_EMAC_LED_TRAFFIC);
5220                 }
5221                 msleep_interruptible(500);
5222                 if (signal_pending(current))
5223                         break;
5224         }
5225         REG_WR(bp, BNX2_EMAC_LED, 0);
5226         REG_WR(bp, BNX2_MISC_CFG, save);
5227         return 0;
5228 }
5229
5230 static struct ethtool_ops bnx2_ethtool_ops = {
5231         .get_settings           = bnx2_get_settings,
5232         .set_settings           = bnx2_set_settings,
5233         .get_drvinfo            = bnx2_get_drvinfo,
5234         .get_regs_len           = bnx2_get_regs_len,
5235         .get_regs               = bnx2_get_regs,
5236         .get_wol                = bnx2_get_wol,
5237         .set_wol                = bnx2_set_wol,
5238         .nway_reset             = bnx2_nway_reset,
5239         .get_link               = ethtool_op_get_link,
5240         .get_eeprom_len         = bnx2_get_eeprom_len,
5241         .get_eeprom             = bnx2_get_eeprom,
5242         .set_eeprom             = bnx2_set_eeprom,
5243         .get_coalesce           = bnx2_get_coalesce,
5244         .set_coalesce           = bnx2_set_coalesce,
5245         .get_ringparam          = bnx2_get_ringparam,
5246         .set_ringparam          = bnx2_set_ringparam,
5247         .get_pauseparam         = bnx2_get_pauseparam,
5248         .set_pauseparam         = bnx2_set_pauseparam,
5249         .get_rx_csum            = bnx2_get_rx_csum,
5250         .set_rx_csum            = bnx2_set_rx_csum,
5251         .get_tx_csum            = ethtool_op_get_tx_csum,
5252         .set_tx_csum            = ethtool_op_set_tx_csum,
5253         .get_sg                 = ethtool_op_get_sg,
5254         .set_sg                 = ethtool_op_set_sg,
5255 #ifdef BCM_TSO
5256         .get_tso                = ethtool_op_get_tso,
5257         .set_tso                = ethtool_op_set_tso,
5258 #endif
5259         .self_test_count        = bnx2_self_test_count,
5260         .self_test              = bnx2_self_test,
5261         .get_strings            = bnx2_get_strings,
5262         .phys_id                = bnx2_phys_id,
5263         .get_stats_count        = bnx2_get_stats_count,
5264         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5265         .get_perm_addr          = ethtool_op_get_perm_addr,
5266 };
5267
5268 /* Called with rtnl_lock */
5269 static int
5270 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5271 {
5272         struct mii_ioctl_data *data = if_mii(ifr);
5273         struct bnx2 *bp = netdev_priv(dev);
5274         int err;
5275
5276         switch(cmd) {
5277         case SIOCGMIIPHY:
5278                 data->phy_id = bp->phy_addr;
5279
5280                 /* fallthru */
5281         case SIOCGMIIREG: {
5282                 u32 mii_regval;
5283
5284                 spin_lock_bh(&bp->phy_lock);
5285                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5286                 spin_unlock_bh(&bp->phy_lock);
5287
5288                 data->val_out = mii_regval;
5289
5290                 return err;
5291         }
5292
5293         case SIOCSMIIREG:
5294                 if (!capable(CAP_NET_ADMIN))
5295                         return -EPERM;
5296
5297                 spin_lock_bh(&bp->phy_lock);
5298                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5299                 spin_unlock_bh(&bp->phy_lock);
5300
5301                 return err;
5302
5303         default:
5304                 /* do nothing */
5305                 break;
5306         }
5307         return -EOPNOTSUPP;
5308 }
5309
5310 /* Called with rtnl_lock */
5311 static int
5312 bnx2_change_mac_addr(struct net_device *dev, void *p)
5313 {
5314         struct sockaddr *addr = p;
5315         struct bnx2 *bp = netdev_priv(dev);
5316
5317         if (!is_valid_ether_addr(addr->sa_data))
5318                 return -EINVAL;
5319
5320         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5321         if (netif_running(dev))
5322                 bnx2_set_mac_addr(bp);
5323
5324         return 0;
5325 }
5326
5327 /* Called with rtnl_lock */
5328 static int
5329 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5330 {
5331         struct bnx2 *bp = netdev_priv(dev);
5332
5333         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5334                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5335                 return -EINVAL;
5336
5337         dev->mtu = new_mtu;
5338         if (netif_running(dev)) {
5339                 bnx2_netif_stop(bp);
5340
5341                 bnx2_init_nic(bp);
5342
5343                 bnx2_netif_start(bp);
5344         }
5345         return 0;
5346 }
5347
5348 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5349 static void
5350 poll_bnx2(struct net_device *dev)
5351 {
5352         struct bnx2 *bp = netdev_priv(dev);
5353
5354         disable_irq(bp->pdev->irq);
5355         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5356         enable_irq(bp->pdev->irq);
5357 }
5358 #endif
5359
5360 static int __devinit
5361 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5362 {
5363         struct bnx2 *bp;
5364         unsigned long mem_len;
5365         int rc;
5366         u32 reg;
5367
5368         SET_MODULE_OWNER(dev);
5369         SET_NETDEV_DEV(dev, &pdev->dev);
5370         bp = netdev_priv(dev);
5371
5372         bp->flags = 0;
5373         bp->phy_flags = 0;
5374
5375         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5376         rc = pci_enable_device(pdev);
5377         if (rc) {
5378                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5379                 goto err_out;
5380         }
5381
5382         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5383                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5384                        "aborting.\n");
5385                 rc = -ENODEV;
5386                 goto err_out_disable;
5387         }
5388
5389         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5390         if (rc) {
5391                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5392                 goto err_out_disable;
5393         }
5394
5395         pci_set_master(pdev);
5396
5397         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5398         if (bp->pm_cap == 0) {
5399                 printk(KERN_ERR PFX "Cannot find power management capability, "
5400                                "aborting.\n");
5401                 rc = -EIO;
5402                 goto err_out_release;
5403         }
5404
5405         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5406         if (bp->pcix_cap == 0) {
5407                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5408                 rc = -EIO;
5409                 goto err_out_release;
5410         }
5411
5412         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5413                 bp->flags |= USING_DAC_FLAG;
5414                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5415                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5416                                "failed, aborting.\n");
5417                         rc = -EIO;
5418                         goto err_out_release;
5419                 }
5420         }
5421         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5422                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5423                 rc = -EIO;
5424                 goto err_out_release;
5425         }
5426
5427         bp->dev = dev;
5428         bp->pdev = pdev;
5429
5430         spin_lock_init(&bp->phy_lock);
5431         spin_lock_init(&bp->tx_lock);
5432         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5433
5434         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5435         mem_len = MB_GET_CID_ADDR(17);
5436         dev->mem_end = dev->mem_start + mem_len;
5437         dev->irq = pdev->irq;
5438
5439         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5440
5441         if (!bp->regview) {
5442                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5443                 rc = -ENOMEM;
5444                 goto err_out_release;
5445         }
5446
5447         /* Configure byte swap and enable write to the reg_window registers.
5448          * Rely on CPU to do target byte swapping on big endian systems
5449          * The chip's target access swapping will not swap all accesses
5450          */
5451         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5452                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5453                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5454
5455         bnx2_set_power_state(bp, PCI_D0);
5456
5457         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5458
5459         /* Get bus information. */
5460         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5461         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5462                 u32 clkreg;
5463
5464                 bp->flags |= PCIX_FLAG;
5465
5466                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5467                 
5468                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5469                 switch (clkreg) {
5470                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5471                         bp->bus_speed_mhz = 133;
5472                         break;
5473
5474                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5475                         bp->bus_speed_mhz = 100;
5476                         break;
5477
5478                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5479                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5480                         bp->bus_speed_mhz = 66;
5481                         break;
5482
5483                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5484                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5485                         bp->bus_speed_mhz = 50;
5486                         break;
5487
5488                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5489                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5490                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5491                         bp->bus_speed_mhz = 33;
5492                         break;
5493                 }
5494         }
5495         else {
5496                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5497                         bp->bus_speed_mhz = 66;
5498                 else
5499                         bp->bus_speed_mhz = 33;
5500         }
5501
5502         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5503                 bp->flags |= PCI_32BIT_FLAG;
5504
5505         /* 5706A0 may falsely detect SERR and PERR. */
5506         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5507                 reg = REG_RD(bp, PCI_COMMAND);
5508                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5509                 REG_WR(bp, PCI_COMMAND, reg);
5510         }
5511         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5512                 !(bp->flags & PCIX_FLAG)) {
5513
5514                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5515                        "aborting.\n");
5516                 goto err_out_unmap;
5517         }
5518
5519         bnx2_init_nvram(bp);
5520
5521         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5522
5523         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5524             BNX2_SHM_HDR_SIGNATURE_SIG)
5525                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5526         else
5527                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5528
5529         /* Get the permanent MAC address.  First we need to make sure the
5530          * firmware is actually running.
5531          */
5532         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5533
5534         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5535             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5536                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5537                 rc = -ENODEV;
5538                 goto err_out_unmap;
5539         }
5540
5541         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5542
5543         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5544         bp->mac_addr[0] = (u8) (reg >> 8);
5545         bp->mac_addr[1] = (u8) reg;
5546
5547         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5548         bp->mac_addr[2] = (u8) (reg >> 24);
5549         bp->mac_addr[3] = (u8) (reg >> 16);
5550         bp->mac_addr[4] = (u8) (reg >> 8);
5551         bp->mac_addr[5] = (u8) reg;
5552
5553         bp->tx_ring_size = MAX_TX_DESC_CNT;
5554         bnx2_set_rx_ring_size(bp, 100);
5555
5556         bp->rx_csum = 1;
5557
5558         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5559
5560         bp->tx_quick_cons_trip_int = 20;
5561         bp->tx_quick_cons_trip = 20;
5562         bp->tx_ticks_int = 80;
5563         bp->tx_ticks = 80;
5564                 
5565         bp->rx_quick_cons_trip_int = 6;
5566         bp->rx_quick_cons_trip = 6;
5567         bp->rx_ticks_int = 18;
5568         bp->rx_ticks = 18;
5569
5570         bp->stats_ticks = 1000000 & 0xffff00;
5571
5572         bp->timer_interval =  HZ;
5573         bp->current_interval =  HZ;
5574
5575         bp->phy_addr = 1;
5576
5577         /* Disable WOL support if we are running on a SERDES chip. */
5578         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5579                 bp->phy_flags |= PHY_SERDES_FLAG;
5580                 bp->flags |= NO_WOL_FLAG;
5581                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5582                         bp->phy_addr = 2;
5583                         reg = REG_RD_IND(bp, bp->shmem_base +
5584                                          BNX2_SHARED_HW_CFG_CONFIG);
5585                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5586                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5587                 }
5588         }
5589
5590         if (CHIP_NUM(bp) == CHIP_NUM_5708)
5591                 bp->flags |= NO_WOL_FLAG;
5592
5593         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5594                 bp->tx_quick_cons_trip_int =
5595                         bp->tx_quick_cons_trip;
5596                 bp->tx_ticks_int = bp->tx_ticks;
5597                 bp->rx_quick_cons_trip_int =
5598                         bp->rx_quick_cons_trip;
5599                 bp->rx_ticks_int = bp->rx_ticks;
5600                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5601                 bp->com_ticks_int = bp->com_ticks;
5602                 bp->cmd_ticks_int = bp->cmd_ticks;
5603         }
5604
5605         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5606         bp->req_line_speed = 0;
5607         if (bp->phy_flags & PHY_SERDES_FLAG) {
5608                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5609
5610                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5611                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5612                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5613                         bp->autoneg = 0;
5614                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5615                         bp->req_duplex = DUPLEX_FULL;
5616                 }
5617         }
5618         else {
5619                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5620         }
5621
5622         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5623
5624         init_timer(&bp->timer);
5625         bp->timer.expires = RUN_AT(bp->timer_interval);
5626         bp->timer.data = (unsigned long) bp;
5627         bp->timer.function = bnx2_timer;
5628
5629         return 0;
5630
5631 err_out_unmap:
5632         if (bp->regview) {
5633                 iounmap(bp->regview);
5634                 bp->regview = NULL;
5635         }
5636
5637 err_out_release:
5638         pci_release_regions(pdev);
5639
5640 err_out_disable:
5641         pci_disable_device(pdev);
5642         pci_set_drvdata(pdev, NULL);
5643
5644 err_out:
5645         return rc;
5646 }
5647
5648 static int __devinit
5649 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5650 {
5651         static int version_printed = 0;
5652         struct net_device *dev = NULL;
5653         struct bnx2 *bp;
5654         int rc, i;
5655
5656         if (version_printed++ == 0)
5657                 printk(KERN_INFO "%s", version);
5658
5659         /* dev zeroed in init_etherdev */
5660         dev = alloc_etherdev(sizeof(*bp));
5661
5662         if (!dev)
5663                 return -ENOMEM;
5664
5665         rc = bnx2_init_board(pdev, dev);
5666         if (rc < 0) {
5667                 free_netdev(dev);
5668                 return rc;
5669         }
5670
5671         dev->open = bnx2_open;
5672         dev->hard_start_xmit = bnx2_start_xmit;
5673         dev->stop = bnx2_close;
5674         dev->get_stats = bnx2_get_stats;
5675         dev->set_multicast_list = bnx2_set_rx_mode;
5676         dev->do_ioctl = bnx2_ioctl;
5677         dev->set_mac_address = bnx2_change_mac_addr;
5678         dev->change_mtu = bnx2_change_mtu;
5679         dev->tx_timeout = bnx2_tx_timeout;
5680         dev->watchdog_timeo = TX_TIMEOUT;
5681 #ifdef BCM_VLAN
5682         dev->vlan_rx_register = bnx2_vlan_rx_register;
5683         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5684 #endif
5685         dev->poll = bnx2_poll;
5686         dev->ethtool_ops = &bnx2_ethtool_ops;
5687         dev->weight = 64;
5688
5689         bp = netdev_priv(dev);
5690
5691 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5692         dev->poll_controller = poll_bnx2;
5693 #endif
5694
5695         if ((rc = register_netdev(dev))) {
5696                 printk(KERN_ERR PFX "Cannot register net device\n");
5697                 if (bp->regview)
5698                         iounmap(bp->regview);
5699                 pci_release_regions(pdev);
5700                 pci_disable_device(pdev);
5701                 pci_set_drvdata(pdev, NULL);
5702                 free_netdev(dev);
5703                 return rc;
5704         }
5705
5706         pci_set_drvdata(pdev, dev);
5707
5708         memcpy(dev->dev_addr, bp->mac_addr, 6);
5709         memcpy(dev->perm_addr, bp->mac_addr, 6);
5710         bp->name = board_info[ent->driver_data].name,
5711         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5712                 "IRQ %d, ",
5713                 dev->name,
5714                 bp->name,
5715                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5716                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5717                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5718                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5719                 bp->bus_speed_mhz,
5720                 dev->base_addr,
5721                 bp->pdev->irq);
5722
5723         printk("node addr ");
5724         for (i = 0; i < 6; i++)
5725                 printk("%2.2x", dev->dev_addr[i]);
5726         printk("\n");
5727
5728         dev->features |= NETIF_F_SG;
5729         if (bp->flags & USING_DAC_FLAG)
5730                 dev->features |= NETIF_F_HIGHDMA;
5731         dev->features |= NETIF_F_IP_CSUM;
5732 #ifdef BCM_VLAN
5733         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5734 #endif
5735 #ifdef BCM_TSO
5736         dev->features |= NETIF_F_TSO;
5737 #endif
5738
5739         netif_carrier_off(bp->dev);
5740
5741         return 0;
5742 }
5743
5744 static void __devexit
5745 bnx2_remove_one(struct pci_dev *pdev)
5746 {
5747         struct net_device *dev = pci_get_drvdata(pdev);
5748         struct bnx2 *bp = netdev_priv(dev);
5749
5750         flush_scheduled_work();
5751
5752         unregister_netdev(dev);
5753
5754         if (bp->regview)
5755                 iounmap(bp->regview);
5756
5757         free_netdev(dev);
5758         pci_release_regions(pdev);
5759         pci_disable_device(pdev);
5760         pci_set_drvdata(pdev, NULL);
5761 }
5762
5763 static int
5764 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5765 {
5766         struct net_device *dev = pci_get_drvdata(pdev);
5767         struct bnx2 *bp = netdev_priv(dev);
5768         u32 reset_code;
5769
5770         if (!netif_running(dev))
5771                 return 0;
5772
5773         flush_scheduled_work();
5774         bnx2_netif_stop(bp);
5775         netif_device_detach(dev);
5776         del_timer_sync(&bp->timer);
5777         if (bp->flags & NO_WOL_FLAG)
5778                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5779         else if (bp->wol)
5780                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5781         else
5782                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5783         bnx2_reset_chip(bp, reset_code);
5784         bnx2_free_skbs(bp);
5785         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5786         return 0;
5787 }
5788
5789 static int
5790 bnx2_resume(struct pci_dev *pdev)
5791 {
5792         struct net_device *dev = pci_get_drvdata(pdev);
5793         struct bnx2 *bp = netdev_priv(dev);
5794
5795         if (!netif_running(dev))
5796                 return 0;
5797
5798         bnx2_set_power_state(bp, PCI_D0);
5799         netif_device_attach(dev);
5800         bnx2_init_nic(bp);
5801         bnx2_netif_start(bp);
5802         return 0;
5803 }
5804
5805 static struct pci_driver bnx2_pci_driver = {
5806         .name           = DRV_MODULE_NAME,
5807         .id_table       = bnx2_pci_tbl,
5808         .probe          = bnx2_init_one,
5809         .remove         = __devexit_p(bnx2_remove_one),
5810         .suspend        = bnx2_suspend,
5811         .resume         = bnx2_resume,
5812 };
5813
5814 static int __init bnx2_init(void)
5815 {
5816         return pci_module_init(&bnx2_pci_driver);
5817 }
5818
5819 static void __exit bnx2_cleanup(void)
5820 {
5821         pci_unregister_driver(&bnx2_pci_driver);
5822 }
5823
5824 module_init(bnx2_init);
5825 module_exit(bnx2_cleanup);
5826
5827
5828