Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
14
15 #define DRV_MODULE_NAME         "bnx2"
16 #define PFX DRV_MODULE_NAME     ": "
17 #define DRV_MODULE_VERSION      "1.4.30"
18 #define DRV_MODULE_RELDATE      "October 11, 2005"
19
20 #define RUN_AT(x) (jiffies + (x))
21
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT  (5*HZ)
24
25 static char version[] __devinitdata =
26         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
32
33 static int disable_msi = 0;
34
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38 typedef enum {
39         BCM5706 = 0,
40         NC370T,
41         NC370I,
42         BCM5706S,
43         NC370F,
44         BCM5708,
45         BCM5708S,
46 } board_t;
47
48 /* indexed by board_t, above */
49 static struct {
50         char *name;
51 } board_info[] __devinitdata = {
52         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53         { "HP NC370T Multifunction Gigabit Server Adapter" },
54         { "HP NC370i Multifunction Gigabit Server Adapter" },
55         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56         { "HP NC370F Multifunction Gigabit Server Adapter" },
57         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
59         };
60
61 static struct pci_device_id bnx2_pci_tbl[] = {
62         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
76         { 0, }
77 };
78
79 static struct flash_spec flash_table[] =
80 {
81         /* Slow EEPROM */
82         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85          "EEPROM - slow"},
86         /* Expansion entry 0001 */
87         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90          "Entry 0001"},
91         /* Saifun SA25F010 (non-buffered flash) */
92         /* strap, cfg1, & write1 need updates */
93         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96          "Non-buffered flash (128kB)"},
97         /* Saifun SA25F020 (non-buffered flash) */
98         /* strap, cfg1, & write1 need updates */
99         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102          "Non-buffered flash (256kB)"},
103         /* Expansion entry 0100 */
104         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107          "Entry 0100"},
108         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
110          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118         /* Saifun SA25F005 (non-buffered flash) */
119         /* strap, cfg1, & write1 need updates */
120         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123          "Non-buffered flash (64kB)"},
124         /* Fast EEPROM */
125         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128          "EEPROM - fast"},
129         /* Expansion entry 1001 */
130         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133          "Entry 1001"},
134         /* Expansion entry 1010 */
135         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 1010"},
139         /* ATMEL AT45DB011B (buffered flash) */
140         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143          "Buffered flash (128kB)"},
144         /* Expansion entry 1100 */
145         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148          "Entry 1100"},
149         /* Expansion entry 1101 */
150         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153          "Entry 1101"},
154         /* Ateml Expansion entry 1110 */
155         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158          "Entry 1110 (Atmel)"},
159         /* ATMEL AT45DB021B (buffered flash) */
160         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163          "Buffered flash (256kB)"},
164 };
165
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169 {
170         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172         if (diff > MAX_TX_DESC_CNT)
173                 diff = (diff & MAX_TX_DESC_CNT) - 1;
174         return (bp->tx_ring_size - diff);
175 }
176
177 static u32
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179 {
180         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182 }
183
184 static void
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186 {
187         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189 }
190
191 static void
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193 {
194         offset += cid_addr;
195         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196         REG_WR(bp, BNX2_CTX_DATA, val);
197 }
198
199 static int
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201 {
202         u32 val1;
203         int i, ret;
204
205         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212                 udelay(40);
213         }
214
215         val1 = (bp->phy_addr << 21) | (reg << 16) |
216                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217                 BNX2_EMAC_MDIO_COMM_START_BUSY;
218         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220         for (i = 0; i < 50; i++) {
221                 udelay(10);
222
223                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225                         udelay(5);
226
227                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230                         break;
231                 }
232         }
233
234         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235                 *val = 0x0;
236                 ret = -EBUSY;
237         }
238         else {
239                 *val = val1;
240                 ret = 0;
241         }
242
243         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250                 udelay(40);
251         }
252
253         return ret;
254 }
255
256 static int
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258 {
259         u32 val1;
260         int i, ret;
261
262         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269                 udelay(40);
270         }
271
272         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276     
277         for (i = 0; i < 50; i++) {
278                 udelay(10);
279
280                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282                         udelay(5);
283                         break;
284                 }
285         }
286
287         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288                 ret = -EBUSY;
289         else
290                 ret = 0;
291
292         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299                 udelay(40);
300         }
301
302         return ret;
303 }
304
305 static void
306 bnx2_disable_int(struct bnx2 *bp)
307 {
308         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311 }
312
313 static void
314 bnx2_enable_int(struct bnx2 *bp)
315 {
316         u32 val;
317
318         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
320
321         val = REG_RD(bp, BNX2_HC_COMMAND);
322         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
323 }
324
325 static void
326 bnx2_disable_int_sync(struct bnx2 *bp)
327 {
328         atomic_inc(&bp->intr_sem);
329         bnx2_disable_int(bp);
330         synchronize_irq(bp->pdev->irq);
331 }
332
333 static void
334 bnx2_netif_stop(struct bnx2 *bp)
335 {
336         bnx2_disable_int_sync(bp);
337         if (netif_running(bp->dev)) {
338                 netif_poll_disable(bp->dev);
339                 netif_tx_disable(bp->dev);
340                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
341         }
342 }
343
344 static void
345 bnx2_netif_start(struct bnx2 *bp)
346 {
347         if (atomic_dec_and_test(&bp->intr_sem)) {
348                 if (netif_running(bp->dev)) {
349                         netif_wake_queue(bp->dev);
350                         netif_poll_enable(bp->dev);
351                         bnx2_enable_int(bp);
352                 }
353         }
354 }
355
356 static void
357 bnx2_free_mem(struct bnx2 *bp)
358 {
359         if (bp->stats_blk) {
360                 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
361                                     bp->stats_blk, bp->stats_blk_mapping);
362                 bp->stats_blk = NULL;
363         }
364         if (bp->status_blk) {
365                 pci_free_consistent(bp->pdev, sizeof(struct status_block),
366                                     bp->status_blk, bp->status_blk_mapping);
367                 bp->status_blk = NULL;
368         }
369         if (bp->tx_desc_ring) {
370                 pci_free_consistent(bp->pdev,
371                                     sizeof(struct tx_bd) * TX_DESC_CNT,
372                                     bp->tx_desc_ring, bp->tx_desc_mapping);
373                 bp->tx_desc_ring = NULL;
374         }
375         kfree(bp->tx_buf_ring);
376         bp->tx_buf_ring = NULL;
377         if (bp->rx_desc_ring) {
378                 pci_free_consistent(bp->pdev,
379                                     sizeof(struct rx_bd) * RX_DESC_CNT,
380                                     bp->rx_desc_ring, bp->rx_desc_mapping);
381                 bp->rx_desc_ring = NULL;
382         }
383         kfree(bp->rx_buf_ring);
384         bp->rx_buf_ring = NULL;
385 }
386
387 static int
388 bnx2_alloc_mem(struct bnx2 *bp)
389 {
390         bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
391                                      GFP_KERNEL);
392         if (bp->tx_buf_ring == NULL)
393                 return -ENOMEM;
394
395         memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
396         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
397                                                 sizeof(struct tx_bd) *
398                                                 TX_DESC_CNT,
399                                                 &bp->tx_desc_mapping);
400         if (bp->tx_desc_ring == NULL)
401                 goto alloc_mem_err;
402
403         bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
404                                      GFP_KERNEL);
405         if (bp->rx_buf_ring == NULL)
406                 goto alloc_mem_err;
407
408         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
409         bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
410                                                 sizeof(struct rx_bd) *
411                                                 RX_DESC_CNT,
412                                                 &bp->rx_desc_mapping);
413         if (bp->rx_desc_ring == NULL)
414                 goto alloc_mem_err;
415
416         bp->status_blk = pci_alloc_consistent(bp->pdev,
417                                               sizeof(struct status_block),
418                                               &bp->status_blk_mapping);
419         if (bp->status_blk == NULL)
420                 goto alloc_mem_err;
421
422         memset(bp->status_blk, 0, sizeof(struct status_block));
423
424         bp->stats_blk = pci_alloc_consistent(bp->pdev,
425                                              sizeof(struct statistics_block),
426                                              &bp->stats_blk_mapping);
427         if (bp->stats_blk == NULL)
428                 goto alloc_mem_err;
429
430         memset(bp->stats_blk, 0, sizeof(struct statistics_block));
431
432         return 0;
433
434 alloc_mem_err:
435         bnx2_free_mem(bp);
436         return -ENOMEM;
437 }
438
439 static void
440 bnx2_report_fw_link(struct bnx2 *bp)
441 {
442         u32 fw_link_status = 0;
443
444         if (bp->link_up) {
445                 u32 bmsr;
446
447                 switch (bp->line_speed) {
448                 case SPEED_10:
449                         if (bp->duplex == DUPLEX_HALF)
450                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
451                         else
452                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
453                         break;
454                 case SPEED_100:
455                         if (bp->duplex == DUPLEX_HALF)
456                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
457                         else
458                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
459                         break;
460                 case SPEED_1000:
461                         if (bp->duplex == DUPLEX_HALF)
462                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
463                         else
464                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
465                         break;
466                 case SPEED_2500:
467                         if (bp->duplex == DUPLEX_HALF)
468                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
469                         else
470                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
471                         break;
472                 }
473
474                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
475
476                 if (bp->autoneg) {
477                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
478
479                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
480                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
481
482                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
483                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
484                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
485                         else
486                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
487                 }
488         }
489         else
490                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
491
492         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
493 }
494
495 static void
496 bnx2_report_link(struct bnx2 *bp)
497 {
498         if (bp->link_up) {
499                 netif_carrier_on(bp->dev);
500                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
501
502                 printk("%d Mbps ", bp->line_speed);
503
504                 if (bp->duplex == DUPLEX_FULL)
505                         printk("full duplex");
506                 else
507                         printk("half duplex");
508
509                 if (bp->flow_ctrl) {
510                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
511                                 printk(", receive ");
512                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
513                                         printk("& transmit ");
514                         }
515                         else {
516                                 printk(", transmit ");
517                         }
518                         printk("flow control ON");
519                 }
520                 printk("\n");
521         }
522         else {
523                 netif_carrier_off(bp->dev);
524                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
525         }
526
527         bnx2_report_fw_link(bp);
528 }
529
530 static void
531 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
532 {
533         u32 local_adv, remote_adv;
534
535         bp->flow_ctrl = 0;
536         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
537                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
538
539                 if (bp->duplex == DUPLEX_FULL) {
540                         bp->flow_ctrl = bp->req_flow_ctrl;
541                 }
542                 return;
543         }
544
545         if (bp->duplex != DUPLEX_FULL) {
546                 return;
547         }
548
549         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
550             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
551                 u32 val;
552
553                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
554                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
555                         bp->flow_ctrl |= FLOW_CTRL_TX;
556                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
557                         bp->flow_ctrl |= FLOW_CTRL_RX;
558                 return;
559         }
560
561         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
562         bnx2_read_phy(bp, MII_LPA, &remote_adv);
563
564         if (bp->phy_flags & PHY_SERDES_FLAG) {
565                 u32 new_local_adv = 0;
566                 u32 new_remote_adv = 0;
567
568                 if (local_adv & ADVERTISE_1000XPAUSE)
569                         new_local_adv |= ADVERTISE_PAUSE_CAP;
570                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
571                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
572                 if (remote_adv & ADVERTISE_1000XPAUSE)
573                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
574                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
575                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
576
577                 local_adv = new_local_adv;
578                 remote_adv = new_remote_adv;
579         }
580
581         /* See Table 28B-3 of 802.3ab-1999 spec. */
582         if (local_adv & ADVERTISE_PAUSE_CAP) {
583                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
584                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
585                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
586                         }
587                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
588                                 bp->flow_ctrl = FLOW_CTRL_RX;
589                         }
590                 }
591                 else {
592                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
593                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
594                         }
595                 }
596         }
597         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
598                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
599                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
600
601                         bp->flow_ctrl = FLOW_CTRL_TX;
602                 }
603         }
604 }
605
606 static int
607 bnx2_5708s_linkup(struct bnx2 *bp)
608 {
609         u32 val;
610
611         bp->link_up = 1;
612         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
613         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
614                 case BCM5708S_1000X_STAT1_SPEED_10:
615                         bp->line_speed = SPEED_10;
616                         break;
617                 case BCM5708S_1000X_STAT1_SPEED_100:
618                         bp->line_speed = SPEED_100;
619                         break;
620                 case BCM5708S_1000X_STAT1_SPEED_1G:
621                         bp->line_speed = SPEED_1000;
622                         break;
623                 case BCM5708S_1000X_STAT1_SPEED_2G5:
624                         bp->line_speed = SPEED_2500;
625                         break;
626         }
627         if (val & BCM5708S_1000X_STAT1_FD)
628                 bp->duplex = DUPLEX_FULL;
629         else
630                 bp->duplex = DUPLEX_HALF;
631
632         return 0;
633 }
634
635 static int
636 bnx2_5706s_linkup(struct bnx2 *bp)
637 {
638         u32 bmcr, local_adv, remote_adv, common;
639
640         bp->link_up = 1;
641         bp->line_speed = SPEED_1000;
642
643         bnx2_read_phy(bp, MII_BMCR, &bmcr);
644         if (bmcr & BMCR_FULLDPLX) {
645                 bp->duplex = DUPLEX_FULL;
646         }
647         else {
648                 bp->duplex = DUPLEX_HALF;
649         }
650
651         if (!(bmcr & BMCR_ANENABLE)) {
652                 return 0;
653         }
654
655         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
656         bnx2_read_phy(bp, MII_LPA, &remote_adv);
657
658         common = local_adv & remote_adv;
659         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
660
661                 if (common & ADVERTISE_1000XFULL) {
662                         bp->duplex = DUPLEX_FULL;
663                 }
664                 else {
665                         bp->duplex = DUPLEX_HALF;
666                 }
667         }
668
669         return 0;
670 }
671
672 static int
673 bnx2_copper_linkup(struct bnx2 *bp)
674 {
675         u32 bmcr;
676
677         bnx2_read_phy(bp, MII_BMCR, &bmcr);
678         if (bmcr & BMCR_ANENABLE) {
679                 u32 local_adv, remote_adv, common;
680
681                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
682                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
683
684                 common = local_adv & (remote_adv >> 2);
685                 if (common & ADVERTISE_1000FULL) {
686                         bp->line_speed = SPEED_1000;
687                         bp->duplex = DUPLEX_FULL;
688                 }
689                 else if (common & ADVERTISE_1000HALF) {
690                         bp->line_speed = SPEED_1000;
691                         bp->duplex = DUPLEX_HALF;
692                 }
693                 else {
694                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
695                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
696
697                         common = local_adv & remote_adv;
698                         if (common & ADVERTISE_100FULL) {
699                                 bp->line_speed = SPEED_100;
700                                 bp->duplex = DUPLEX_FULL;
701                         }
702                         else if (common & ADVERTISE_100HALF) {
703                                 bp->line_speed = SPEED_100;
704                                 bp->duplex = DUPLEX_HALF;
705                         }
706                         else if (common & ADVERTISE_10FULL) {
707                                 bp->line_speed = SPEED_10;
708                                 bp->duplex = DUPLEX_FULL;
709                         }
710                         else if (common & ADVERTISE_10HALF) {
711                                 bp->line_speed = SPEED_10;
712                                 bp->duplex = DUPLEX_HALF;
713                         }
714                         else {
715                                 bp->line_speed = 0;
716                                 bp->link_up = 0;
717                         }
718                 }
719         }
720         else {
721                 if (bmcr & BMCR_SPEED100) {
722                         bp->line_speed = SPEED_100;
723                 }
724                 else {
725                         bp->line_speed = SPEED_10;
726                 }
727                 if (bmcr & BMCR_FULLDPLX) {
728                         bp->duplex = DUPLEX_FULL;
729                 }
730                 else {
731                         bp->duplex = DUPLEX_HALF;
732                 }
733         }
734
735         return 0;
736 }
737
738 static int
739 bnx2_set_mac_link(struct bnx2 *bp)
740 {
741         u32 val;
742
743         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
744         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
745                 (bp->duplex == DUPLEX_HALF)) {
746                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
747         }
748
749         /* Configure the EMAC mode register. */
750         val = REG_RD(bp, BNX2_EMAC_MODE);
751
752         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
753                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
754                 BNX2_EMAC_MODE_25G);
755
756         if (bp->link_up) {
757                 switch (bp->line_speed) {
758                         case SPEED_10:
759                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
760                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
761                                         break;
762                                 }
763                                 /* fall through */
764                         case SPEED_100:
765                                 val |= BNX2_EMAC_MODE_PORT_MII;
766                                 break;
767                         case SPEED_2500:
768                                 val |= BNX2_EMAC_MODE_25G;
769                                 /* fall through */
770                         case SPEED_1000:
771                                 val |= BNX2_EMAC_MODE_PORT_GMII;
772                                 break;
773                 }
774         }
775         else {
776                 val |= BNX2_EMAC_MODE_PORT_GMII;
777         }
778
779         /* Set the MAC to operate in the appropriate duplex mode. */
780         if (bp->duplex == DUPLEX_HALF)
781                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
782         REG_WR(bp, BNX2_EMAC_MODE, val);
783
784         /* Enable/disable rx PAUSE. */
785         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
786
787         if (bp->flow_ctrl & FLOW_CTRL_RX)
788                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
789         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
790
791         /* Enable/disable tx PAUSE. */
792         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
793         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
794
795         if (bp->flow_ctrl & FLOW_CTRL_TX)
796                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
797         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
798
799         /* Acknowledge the interrupt. */
800         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
801
802         return 0;
803 }
804
805 static int
806 bnx2_set_link(struct bnx2 *bp)
807 {
808         u32 bmsr;
809         u8 link_up;
810
811         if (bp->loopback == MAC_LOOPBACK) {
812                 bp->link_up = 1;
813                 return 0;
814         }
815
816         link_up = bp->link_up;
817
818         bnx2_read_phy(bp, MII_BMSR, &bmsr);
819         bnx2_read_phy(bp, MII_BMSR, &bmsr);
820
821         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
822             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
823                 u32 val;
824
825                 val = REG_RD(bp, BNX2_EMAC_STATUS);
826                 if (val & BNX2_EMAC_STATUS_LINK)
827                         bmsr |= BMSR_LSTATUS;
828                 else
829                         bmsr &= ~BMSR_LSTATUS;
830         }
831
832         if (bmsr & BMSR_LSTATUS) {
833                 bp->link_up = 1;
834
835                 if (bp->phy_flags & PHY_SERDES_FLAG) {
836                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
837                                 bnx2_5706s_linkup(bp);
838                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
839                                 bnx2_5708s_linkup(bp);
840                 }
841                 else {
842                         bnx2_copper_linkup(bp);
843                 }
844                 bnx2_resolve_flow_ctrl(bp);
845         }
846         else {
847                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
848                         (bp->autoneg & AUTONEG_SPEED)) {
849
850                         u32 bmcr;
851
852                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
853                         if (!(bmcr & BMCR_ANENABLE)) {
854                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
855                                         BMCR_ANENABLE);
856                         }
857                 }
858                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
859                 bp->link_up = 0;
860         }
861
862         if (bp->link_up != link_up) {
863                 bnx2_report_link(bp);
864         }
865
866         bnx2_set_mac_link(bp);
867
868         return 0;
869 }
870
871 static int
872 bnx2_reset_phy(struct bnx2 *bp)
873 {
874         int i;
875         u32 reg;
876
877         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
878
879 #define PHY_RESET_MAX_WAIT 100
880         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
881                 udelay(10);
882
883                 bnx2_read_phy(bp, MII_BMCR, &reg);
884                 if (!(reg & BMCR_RESET)) {
885                         udelay(20);
886                         break;
887                 }
888         }
889         if (i == PHY_RESET_MAX_WAIT) {
890                 return -EBUSY;
891         }
892         return 0;
893 }
894
895 static u32
896 bnx2_phy_get_pause_adv(struct bnx2 *bp)
897 {
898         u32 adv = 0;
899
900         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
901                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
902
903                 if (bp->phy_flags & PHY_SERDES_FLAG) {
904                         adv = ADVERTISE_1000XPAUSE;
905                 }
906                 else {
907                         adv = ADVERTISE_PAUSE_CAP;
908                 }
909         }
910         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
911                 if (bp->phy_flags & PHY_SERDES_FLAG) {
912                         adv = ADVERTISE_1000XPSE_ASYM;
913                 }
914                 else {
915                         adv = ADVERTISE_PAUSE_ASYM;
916                 }
917         }
918         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
919                 if (bp->phy_flags & PHY_SERDES_FLAG) {
920                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
921                 }
922                 else {
923                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
924                 }
925         }
926         return adv;
927 }
928
929 static int
930 bnx2_setup_serdes_phy(struct bnx2 *bp)
931 {
932         u32 adv, bmcr, up1;
933         u32 new_adv = 0;
934
935         if (!(bp->autoneg & AUTONEG_SPEED)) {
936                 u32 new_bmcr;
937                 int force_link_down = 0;
938
939                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
940                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
941                         if (up1 & BCM5708S_UP1_2G5) {
942                                 up1 &= ~BCM5708S_UP1_2G5;
943                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
944                                 force_link_down = 1;
945                         }
946                 }
947
948                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
949                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
950
951                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952                 new_bmcr = bmcr & ~BMCR_ANENABLE;
953                 new_bmcr |= BMCR_SPEED1000;
954                 if (bp->req_duplex == DUPLEX_FULL) {
955                         adv |= ADVERTISE_1000XFULL;
956                         new_bmcr |= BMCR_FULLDPLX;
957                 }
958                 else {
959                         adv |= ADVERTISE_1000XHALF;
960                         new_bmcr &= ~BMCR_FULLDPLX;
961                 }
962                 if ((new_bmcr != bmcr) || (force_link_down)) {
963                         /* Force a link down visible on the other side */
964                         if (bp->link_up) {
965                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
966                                                ~(ADVERTISE_1000XFULL |
967                                                  ADVERTISE_1000XHALF));
968                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
969                                         BMCR_ANRESTART | BMCR_ANENABLE);
970
971                                 bp->link_up = 0;
972                                 netif_carrier_off(bp->dev);
973                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
974                         }
975                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
976                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
977                 }
978                 return 0;
979         }
980
981         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
982                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
983                 up1 |= BCM5708S_UP1_2G5;
984                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
985         }
986
987         if (bp->advertising & ADVERTISED_1000baseT_Full)
988                 new_adv |= ADVERTISE_1000XFULL;
989
990         new_adv |= bnx2_phy_get_pause_adv(bp);
991
992         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993         bnx2_read_phy(bp, MII_BMCR, &bmcr);
994
995         bp->serdes_an_pending = 0;
996         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
997                 /* Force a link down visible on the other side */
998                 if (bp->link_up) {
999                         int i;
1000
1001                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1002                         for (i = 0; i < 110; i++) {
1003                                 udelay(100);
1004                         }
1005                 }
1006
1007                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1008                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1009                         BMCR_ANENABLE);
1010                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1011                         /* Speed up link-up time when the link partner
1012                          * does not autonegotiate which is very common
1013                          * in blade servers. Some blade servers use
1014                          * IPMI for kerboard input and it's important
1015                          * to minimize link disruptions. Autoneg. involves
1016                          * exchanging base pages plus 3 next pages and
1017                          * normally completes in about 120 msec.
1018                          */
1019                         bp->current_interval = SERDES_AN_TIMEOUT;
1020                         bp->serdes_an_pending = 1;
1021                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1022                 }
1023         }
1024
1025         return 0;
1026 }
1027
1028 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1029         (ADVERTISED_1000baseT_Full)
1030
1031 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1032         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1033         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1034         ADVERTISED_1000baseT_Full)
1035
1036 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1037         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1038         
1039 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1040
1041 static int
1042 bnx2_setup_copper_phy(struct bnx2 *bp)
1043 {
1044         u32 bmcr;
1045         u32 new_bmcr;
1046
1047         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1048
1049         if (bp->autoneg & AUTONEG_SPEED) {
1050                 u32 adv_reg, adv1000_reg;
1051                 u32 new_adv_reg = 0;
1052                 u32 new_adv1000_reg = 0;
1053
1054                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1055                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1056                         ADVERTISE_PAUSE_ASYM);
1057
1058                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1059                 adv1000_reg &= PHY_ALL_1000_SPEED;
1060
1061                 if (bp->advertising & ADVERTISED_10baseT_Half)
1062                         new_adv_reg |= ADVERTISE_10HALF;
1063                 if (bp->advertising & ADVERTISED_10baseT_Full)
1064                         new_adv_reg |= ADVERTISE_10FULL;
1065                 if (bp->advertising & ADVERTISED_100baseT_Half)
1066                         new_adv_reg |= ADVERTISE_100HALF;
1067                 if (bp->advertising & ADVERTISED_100baseT_Full)
1068                         new_adv_reg |= ADVERTISE_100FULL;
1069                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1070                         new_adv1000_reg |= ADVERTISE_1000FULL;
1071                 
1072                 new_adv_reg |= ADVERTISE_CSMA;
1073
1074                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1075
1076                 if ((adv1000_reg != new_adv1000_reg) ||
1077                         (adv_reg != new_adv_reg) ||
1078                         ((bmcr & BMCR_ANENABLE) == 0)) {
1079
1080                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1081                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1082                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1083                                 BMCR_ANENABLE);
1084                 }
1085                 else if (bp->link_up) {
1086                         /* Flow ctrl may have changed from auto to forced */
1087                         /* or vice-versa. */
1088
1089                         bnx2_resolve_flow_ctrl(bp);
1090                         bnx2_set_mac_link(bp);
1091                 }
1092                 return 0;
1093         }
1094
1095         new_bmcr = 0;
1096         if (bp->req_line_speed == SPEED_100) {
1097                 new_bmcr |= BMCR_SPEED100;
1098         }
1099         if (bp->req_duplex == DUPLEX_FULL) {
1100                 new_bmcr |= BMCR_FULLDPLX;
1101         }
1102         if (new_bmcr != bmcr) {
1103                 u32 bmsr;
1104                 int i = 0;
1105
1106                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1107                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1108                 
1109                 if (bmsr & BMSR_LSTATUS) {
1110                         /* Force link down */
1111                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1112                         do {
1113                                 udelay(100);
1114                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1115                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1116                                 i++;
1117                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1118                 }
1119
1120                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1121
1122                 /* Normally, the new speed is setup after the link has
1123                  * gone down and up again. In some cases, link will not go
1124                  * down so we need to set up the new speed here.
1125                  */
1126                 if (bmsr & BMSR_LSTATUS) {
1127                         bp->line_speed = bp->req_line_speed;
1128                         bp->duplex = bp->req_duplex;
1129                         bnx2_resolve_flow_ctrl(bp);
1130                         bnx2_set_mac_link(bp);
1131                 }
1132         }
1133         return 0;
1134 }
1135
1136 static int
1137 bnx2_setup_phy(struct bnx2 *bp)
1138 {
1139         if (bp->loopback == MAC_LOOPBACK)
1140                 return 0;
1141
1142         if (bp->phy_flags & PHY_SERDES_FLAG) {
1143                 return (bnx2_setup_serdes_phy(bp));
1144         }
1145         else {
1146                 return (bnx2_setup_copper_phy(bp));
1147         }
1148 }
1149
1150 static int
1151 bnx2_init_5708s_phy(struct bnx2 *bp)
1152 {
1153         u32 val;
1154
1155         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1156         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1157         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1158
1159         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1160         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1161         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1162
1163         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1164         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1165         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1166
1167         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1168                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1169                 val |= BCM5708S_UP1_2G5;
1170                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1171         }
1172
1173         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1174             (CHIP_ID(bp) == CHIP_ID_5708_B0)) {
1175                 /* increase tx signal amplitude */
1176                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1177                                BCM5708S_BLK_ADDR_TX_MISC);
1178                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1179                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1180                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1181                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1182         }
1183
1184         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1185               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1186
1187         if (val) {
1188                 u32 is_backplane;
1189
1190                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1191                                           BNX2_SHARED_HW_CFG_CONFIG);
1192                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1193                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1194                                        BCM5708S_BLK_ADDR_TX_MISC);
1195                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1196                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1197                                        BCM5708S_BLK_ADDR_DIG);
1198                 }
1199         }
1200         return 0;
1201 }
1202
1203 static int
1204 bnx2_init_5706s_phy(struct bnx2 *bp)
1205 {
1206         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1207
1208         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1209                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1210         }
1211
1212         if (bp->dev->mtu > 1500) {
1213                 u32 val;
1214
1215                 /* Set extended packet length bit */
1216                 bnx2_write_phy(bp, 0x18, 0x7);
1217                 bnx2_read_phy(bp, 0x18, &val);
1218                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1219
1220                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1221                 bnx2_read_phy(bp, 0x1c, &val);
1222                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1223         }
1224         else {
1225                 u32 val;
1226
1227                 bnx2_write_phy(bp, 0x18, 0x7);
1228                 bnx2_read_phy(bp, 0x18, &val);
1229                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1230
1231                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1232                 bnx2_read_phy(bp, 0x1c, &val);
1233                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1234         }
1235
1236         return 0;
1237 }
1238
1239 static int
1240 bnx2_init_copper_phy(struct bnx2 *bp)
1241 {
1242         u32 val;
1243
1244         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1245
1246         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1247                 bnx2_write_phy(bp, 0x18, 0x0c00);
1248                 bnx2_write_phy(bp, 0x17, 0x000a);
1249                 bnx2_write_phy(bp, 0x15, 0x310b);
1250                 bnx2_write_phy(bp, 0x17, 0x201f);
1251                 bnx2_write_phy(bp, 0x15, 0x9506);
1252                 bnx2_write_phy(bp, 0x17, 0x401f);
1253                 bnx2_write_phy(bp, 0x15, 0x14e2);
1254                 bnx2_write_phy(bp, 0x18, 0x0400);
1255         }
1256
1257         if (bp->dev->mtu > 1500) {
1258                 /* Set extended packet length bit */
1259                 bnx2_write_phy(bp, 0x18, 0x7);
1260                 bnx2_read_phy(bp, 0x18, &val);
1261                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1262
1263                 bnx2_read_phy(bp, 0x10, &val);
1264                 bnx2_write_phy(bp, 0x10, val | 0x1);
1265         }
1266         else {
1267                 bnx2_write_phy(bp, 0x18, 0x7);
1268                 bnx2_read_phy(bp, 0x18, &val);
1269                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1270
1271                 bnx2_read_phy(bp, 0x10, &val);
1272                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1273         }
1274
1275         /* ethernet@wirespeed */
1276         bnx2_write_phy(bp, 0x18, 0x7007);
1277         bnx2_read_phy(bp, 0x18, &val);
1278         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1279         return 0;
1280 }
1281
1282
1283 static int
1284 bnx2_init_phy(struct bnx2 *bp)
1285 {
1286         u32 val;
1287         int rc = 0;
1288
1289         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1290         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1291
1292         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1293
1294         bnx2_reset_phy(bp);
1295
1296         bnx2_read_phy(bp, MII_PHYSID1, &val);
1297         bp->phy_id = val << 16;
1298         bnx2_read_phy(bp, MII_PHYSID2, &val);
1299         bp->phy_id |= val & 0xffff;
1300
1301         if (bp->phy_flags & PHY_SERDES_FLAG) {
1302                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1303                         rc = bnx2_init_5706s_phy(bp);
1304                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1305                         rc = bnx2_init_5708s_phy(bp);
1306         }
1307         else {
1308                 rc = bnx2_init_copper_phy(bp);
1309         }
1310
1311         bnx2_setup_phy(bp);
1312
1313         return rc;
1314 }
1315
1316 static int
1317 bnx2_set_mac_loopback(struct bnx2 *bp)
1318 {
1319         u32 mac_mode;
1320
1321         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1322         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1323         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1324         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1325         bp->link_up = 1;
1326         return 0;
1327 }
1328
1329 static int
1330 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data)
1331 {
1332         int i;
1333         u32 val;
1334
1335         if (bp->fw_timed_out)
1336                 return -EBUSY;
1337
1338         bp->fw_wr_seq++;
1339         msg_data |= bp->fw_wr_seq;
1340
1341         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1342
1343         /* wait for an acknowledgement. */
1344         for (i = 0; i < (FW_ACK_TIME_OUT_MS * 1000)/5; i++) {
1345                 udelay(5);
1346
1347                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1348
1349                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1350                         break;
1351         }
1352
1353         /* If we timed out, inform the firmware that this is the case. */
1354         if (((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) &&
1355                 ((msg_data & BNX2_DRV_MSG_DATA) != BNX2_DRV_MSG_DATA_WAIT0)) {
1356
1357                 msg_data &= ~BNX2_DRV_MSG_CODE;
1358                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1359
1360                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1361
1362                 bp->fw_timed_out = 1;
1363
1364                 return -EBUSY;
1365         }
1366
1367         return 0;
1368 }
1369
1370 static void
1371 bnx2_init_context(struct bnx2 *bp)
1372 {
1373         u32 vcid;
1374
1375         vcid = 96;
1376         while (vcid) {
1377                 u32 vcid_addr, pcid_addr, offset;
1378
1379                 vcid--;
1380
1381                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1382                         u32 new_vcid;
1383
1384                         vcid_addr = GET_PCID_ADDR(vcid);
1385                         if (vcid & 0x8) {
1386                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1387                         }
1388                         else {
1389                                 new_vcid = vcid;
1390                         }
1391                         pcid_addr = GET_PCID_ADDR(new_vcid);
1392                 }
1393                 else {
1394                         vcid_addr = GET_CID_ADDR(vcid);
1395                         pcid_addr = vcid_addr;
1396                 }
1397
1398                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1399                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1400
1401                 /* Zero out the context. */
1402                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1403                         CTX_WR(bp, 0x00, offset, 0);
1404                 }
1405
1406                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1407                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1408         }
1409 }
1410
1411 static int
1412 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1413 {
1414         u16 *good_mbuf;
1415         u32 good_mbuf_cnt;
1416         u32 val;
1417
1418         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1419         if (good_mbuf == NULL) {
1420                 printk(KERN_ERR PFX "Failed to allocate memory in "
1421                                     "bnx2_alloc_bad_rbuf\n");
1422                 return -ENOMEM;
1423         }
1424
1425         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1426                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1427
1428         good_mbuf_cnt = 0;
1429
1430         /* Allocate a bunch of mbufs and save the good ones in an array. */
1431         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1432         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1433                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1434
1435                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1436
1437                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1438
1439                 /* The addresses with Bit 9 set are bad memory blocks. */
1440                 if (!(val & (1 << 9))) {
1441                         good_mbuf[good_mbuf_cnt] = (u16) val;
1442                         good_mbuf_cnt++;
1443                 }
1444
1445                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1446         }
1447
1448         /* Free the good ones back to the mbuf pool thus discarding
1449          * all the bad ones. */
1450         while (good_mbuf_cnt) {
1451                 good_mbuf_cnt--;
1452
1453                 val = good_mbuf[good_mbuf_cnt];
1454                 val = (val << 9) | val | 1;
1455
1456                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1457         }
1458         kfree(good_mbuf);
1459         return 0;
1460 }
1461
1462 static void
1463 bnx2_set_mac_addr(struct bnx2 *bp) 
1464 {
1465         u32 val;
1466         u8 *mac_addr = bp->dev->dev_addr;
1467
1468         val = (mac_addr[0] << 8) | mac_addr[1];
1469
1470         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1471
1472         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1473                 (mac_addr[4] << 8) | mac_addr[5];
1474
1475         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1476 }
1477
1478 static inline int
1479 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1480 {
1481         struct sk_buff *skb;
1482         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1483         dma_addr_t mapping;
1484         struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1485         unsigned long align;
1486
1487         skb = dev_alloc_skb(bp->rx_buf_size);
1488         if (skb == NULL) {
1489                 return -ENOMEM;
1490         }
1491
1492         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1493                 skb_reserve(skb, 8 - align);
1494         }
1495
1496         skb->dev = bp->dev;
1497         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1498                 PCI_DMA_FROMDEVICE);
1499
1500         rx_buf->skb = skb;
1501         pci_unmap_addr_set(rx_buf, mapping, mapping);
1502
1503         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1504         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1505
1506         bp->rx_prod_bseq += bp->rx_buf_use_size;
1507
1508         return 0;
1509 }
1510
1511 static void
1512 bnx2_phy_int(struct bnx2 *bp)
1513 {
1514         u32 new_link_state, old_link_state;
1515
1516         new_link_state = bp->status_blk->status_attn_bits &
1517                 STATUS_ATTN_BITS_LINK_STATE;
1518         old_link_state = bp->status_blk->status_attn_bits_ack &
1519                 STATUS_ATTN_BITS_LINK_STATE;
1520         if (new_link_state != old_link_state) {
1521                 if (new_link_state) {
1522                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1523                                 STATUS_ATTN_BITS_LINK_STATE);
1524                 }
1525                 else {
1526                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1527                                 STATUS_ATTN_BITS_LINK_STATE);
1528                 }
1529                 bnx2_set_link(bp);
1530         }
1531 }
1532
1533 static void
1534 bnx2_tx_int(struct bnx2 *bp)
1535 {
1536         struct status_block *sblk = bp->status_blk;
1537         u16 hw_cons, sw_cons, sw_ring_cons;
1538         int tx_free_bd = 0;
1539
1540         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1541         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1542                 hw_cons++;
1543         }
1544         sw_cons = bp->tx_cons;
1545
1546         while (sw_cons != hw_cons) {
1547                 struct sw_bd *tx_buf;
1548                 struct sk_buff *skb;
1549                 int i, last;
1550
1551                 sw_ring_cons = TX_RING_IDX(sw_cons);
1552
1553                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1554                 skb = tx_buf->skb;
1555 #ifdef BCM_TSO 
1556                 /* partial BD completions possible with TSO packets */
1557                 if (skb_shinfo(skb)->tso_size) {
1558                         u16 last_idx, last_ring_idx;
1559
1560                         last_idx = sw_cons +
1561                                 skb_shinfo(skb)->nr_frags + 1;
1562                         last_ring_idx = sw_ring_cons +
1563                                 skb_shinfo(skb)->nr_frags + 1;
1564                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1565                                 last_idx++;
1566                         }
1567                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1568                                 break;
1569                         }
1570                 }
1571 #endif
1572                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1573                         skb_headlen(skb), PCI_DMA_TODEVICE);
1574
1575                 tx_buf->skb = NULL;
1576                 last = skb_shinfo(skb)->nr_frags;
1577
1578                 for (i = 0; i < last; i++) {
1579                         sw_cons = NEXT_TX_BD(sw_cons);
1580
1581                         pci_unmap_page(bp->pdev,
1582                                 pci_unmap_addr(
1583                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1584                                         mapping),
1585                                 skb_shinfo(skb)->frags[i].size,
1586                                 PCI_DMA_TODEVICE);
1587                 }
1588
1589                 sw_cons = NEXT_TX_BD(sw_cons);
1590
1591                 tx_free_bd += last + 1;
1592
1593                 dev_kfree_skb_irq(skb);
1594
1595                 hw_cons = bp->hw_tx_cons =
1596                         sblk->status_tx_quick_consumer_index0;
1597
1598                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1599                         hw_cons++;
1600                 }
1601         }
1602
1603         bp->tx_cons = sw_cons;
1604
1605         if (unlikely(netif_queue_stopped(bp->dev))) {
1606                 spin_lock(&bp->tx_lock);
1607                 if ((netif_queue_stopped(bp->dev)) &&
1608                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1609
1610                         netif_wake_queue(bp->dev);
1611                 }
1612                 spin_unlock(&bp->tx_lock);
1613         }
1614 }
1615
1616 static inline void
1617 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1618         u16 cons, u16 prod)
1619 {
1620         struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
1621         struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
1622         struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
1623         struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
1624
1625         pci_dma_sync_single_for_device(bp->pdev,
1626                 pci_unmap_addr(cons_rx_buf, mapping),
1627                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1628
1629         prod_rx_buf->skb = cons_rx_buf->skb;
1630         pci_unmap_addr_set(prod_rx_buf, mapping,
1631                         pci_unmap_addr(cons_rx_buf, mapping));
1632
1633         memcpy(prod_bd, cons_bd, 8);
1634
1635         bp->rx_prod_bseq += bp->rx_buf_use_size;
1636
1637 }
1638
1639 static int
1640 bnx2_rx_int(struct bnx2 *bp, int budget)
1641 {
1642         struct status_block *sblk = bp->status_blk;
1643         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1644         struct l2_fhdr *rx_hdr;
1645         int rx_pkt = 0;
1646
1647         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1648         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1649                 hw_cons++;
1650         }
1651         sw_cons = bp->rx_cons;
1652         sw_prod = bp->rx_prod;
1653
1654         /* Memory barrier necessary as speculative reads of the rx
1655          * buffer can be ahead of the index in the status block
1656          */
1657         rmb();
1658         while (sw_cons != hw_cons) {
1659                 unsigned int len;
1660                 u16 status;
1661                 struct sw_bd *rx_buf;
1662                 struct sk_buff *skb;
1663
1664                 sw_ring_cons = RX_RING_IDX(sw_cons);
1665                 sw_ring_prod = RX_RING_IDX(sw_prod);
1666
1667                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1668                 skb = rx_buf->skb;
1669                 pci_dma_sync_single_for_cpu(bp->pdev,
1670                         pci_unmap_addr(rx_buf, mapping),
1671                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1672
1673                 rx_hdr = (struct l2_fhdr *) skb->data;
1674                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1675
1676                 if (rx_hdr->l2_fhdr_errors &
1677                         (L2_FHDR_ERRORS_BAD_CRC |
1678                         L2_FHDR_ERRORS_PHY_DECODE |
1679                         L2_FHDR_ERRORS_ALIGNMENT |
1680                         L2_FHDR_ERRORS_TOO_SHORT |
1681                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1682
1683                         goto reuse_rx;
1684                 }
1685
1686                 /* Since we don't have a jumbo ring, copy small packets
1687                  * if mtu > 1500
1688                  */
1689                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1690                         struct sk_buff *new_skb;
1691
1692                         new_skb = dev_alloc_skb(len + 2);
1693                         if (new_skb == NULL)
1694                                 goto reuse_rx;
1695
1696                         /* aligned copy */
1697                         memcpy(new_skb->data,
1698                                 skb->data + bp->rx_offset - 2,
1699                                 len + 2);
1700
1701                         skb_reserve(new_skb, 2);
1702                         skb_put(new_skb, len);
1703                         new_skb->dev = bp->dev;
1704
1705                         bnx2_reuse_rx_skb(bp, skb,
1706                                 sw_ring_cons, sw_ring_prod);
1707
1708                         skb = new_skb;
1709                 }
1710                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1711                         pci_unmap_single(bp->pdev,
1712                                 pci_unmap_addr(rx_buf, mapping),
1713                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1714
1715                         skb_reserve(skb, bp->rx_offset);
1716                         skb_put(skb, len);
1717                 }
1718                 else {
1719 reuse_rx:
1720                         bnx2_reuse_rx_skb(bp, skb,
1721                                 sw_ring_cons, sw_ring_prod);
1722                         goto next_rx;
1723                 }
1724
1725                 skb->protocol = eth_type_trans(skb, bp->dev);
1726
1727                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1728                         (htons(skb->protocol) != 0x8100)) {
1729
1730                         dev_kfree_skb_irq(skb);
1731                         goto next_rx;
1732
1733                 }
1734
1735                 status = rx_hdr->l2_fhdr_status;
1736                 skb->ip_summed = CHECKSUM_NONE;
1737                 if (bp->rx_csum &&
1738                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1739                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1740
1741                         u16 cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
1742
1743                         if (cksum == 0xffff)
1744                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1745                 }
1746
1747 #ifdef BCM_VLAN
1748                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1749                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1750                                 rx_hdr->l2_fhdr_vlan_tag);
1751                 }
1752                 else
1753 #endif
1754                         netif_receive_skb(skb);
1755
1756                 bp->dev->last_rx = jiffies;
1757                 rx_pkt++;
1758
1759 next_rx:
1760                 rx_buf->skb = NULL;
1761
1762                 sw_cons = NEXT_RX_BD(sw_cons);
1763                 sw_prod = NEXT_RX_BD(sw_prod);
1764
1765                 if ((rx_pkt == budget))
1766                         break;
1767
1768                 /* Refresh hw_cons to see if there is new work */
1769                 if (sw_cons == hw_cons) {
1770                         hw_cons = bp->hw_rx_cons =
1771                                 sblk->status_rx_quick_consumer_index0;
1772                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1773                                 hw_cons++;
1774                         rmb();
1775                 }
1776         }
1777         bp->rx_cons = sw_cons;
1778         bp->rx_prod = sw_prod;
1779
1780         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1781
1782         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1783
1784         mmiowb();
1785
1786         return rx_pkt;
1787
1788 }
1789
1790 /* MSI ISR - The only difference between this and the INTx ISR
1791  * is that the MSI interrupt is always serviced.
1792  */
1793 static irqreturn_t
1794 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1795 {
1796         struct net_device *dev = dev_instance;
1797         struct bnx2 *bp = dev->priv;
1798
1799         prefetch(bp->status_blk);
1800         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1801                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1802                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1803
1804         /* Return here if interrupt is disabled. */
1805         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1806                 return IRQ_HANDLED;
1807
1808         netif_rx_schedule(dev);
1809
1810         return IRQ_HANDLED;
1811 }
1812
1813 static irqreturn_t
1814 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1815 {
1816         struct net_device *dev = dev_instance;
1817         struct bnx2 *bp = dev->priv;
1818
1819         /* When using INTx, it is possible for the interrupt to arrive
1820          * at the CPU before the status block posted prior to the
1821          * interrupt. Reading a register will flush the status block.
1822          * When using MSI, the MSI message will always complete after
1823          * the status block write.
1824          */
1825         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1826             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1827              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1828                 return IRQ_NONE;
1829
1830         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1831                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1832                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1833
1834         /* Return here if interrupt is shared and is disabled. */
1835         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1836                 return IRQ_HANDLED;
1837
1838         netif_rx_schedule(dev);
1839
1840         return IRQ_HANDLED;
1841 }
1842
1843 static inline int
1844 bnx2_has_work(struct bnx2 *bp)
1845 {
1846         struct status_block *sblk = bp->status_blk;
1847
1848         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1849             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1850                 return 1;
1851
1852         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1853             bp->link_up)
1854                 return 1;
1855
1856         return 0;
1857 }
1858
1859 static int
1860 bnx2_poll(struct net_device *dev, int *budget)
1861 {
1862         struct bnx2 *bp = dev->priv;
1863
1864         if ((bp->status_blk->status_attn_bits &
1865                 STATUS_ATTN_BITS_LINK_STATE) !=
1866                 (bp->status_blk->status_attn_bits_ack &
1867                 STATUS_ATTN_BITS_LINK_STATE)) {
1868
1869                 spin_lock(&bp->phy_lock);
1870                 bnx2_phy_int(bp);
1871                 spin_unlock(&bp->phy_lock);
1872         }
1873
1874         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1875                 bnx2_tx_int(bp);
1876
1877         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1878                 int orig_budget = *budget;
1879                 int work_done;
1880
1881                 if (orig_budget > dev->quota)
1882                         orig_budget = dev->quota;
1883                 
1884                 work_done = bnx2_rx_int(bp, orig_budget);
1885                 *budget -= work_done;
1886                 dev->quota -= work_done;
1887         }
1888         
1889         bp->last_status_idx = bp->status_blk->status_idx;
1890         rmb();
1891
1892         if (!bnx2_has_work(bp)) {
1893                 netif_rx_complete(dev);
1894                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1895                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1896                         bp->last_status_idx);
1897                 return 0;
1898         }
1899
1900         return 1;
1901 }
1902
1903 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1904  * from set_multicast.
1905  */
1906 static void
1907 bnx2_set_rx_mode(struct net_device *dev)
1908 {
1909         struct bnx2 *bp = dev->priv;
1910         u32 rx_mode, sort_mode;
1911         int i;
1912
1913         spin_lock_bh(&bp->phy_lock);
1914
1915         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1916                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1917         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1918 #ifdef BCM_VLAN
1919         if (!bp->vlgrp) {
1920                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1921         }
1922 #else
1923         rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1924 #endif
1925         if (dev->flags & IFF_PROMISC) {
1926                 /* Promiscuous mode. */
1927                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1928                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1929         }
1930         else if (dev->flags & IFF_ALLMULTI) {
1931                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1932                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1933                                0xffffffff);
1934                 }
1935                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1936         }
1937         else {
1938                 /* Accept one or more multicast(s). */
1939                 struct dev_mc_list *mclist;
1940                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1941                 u32 regidx;
1942                 u32 bit;
1943                 u32 crc;
1944
1945                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
1946
1947                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1948                      i++, mclist = mclist->next) {
1949
1950                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1951                         bit = crc & 0xff;
1952                         regidx = (bit & 0xe0) >> 5;
1953                         bit &= 0x1f;
1954                         mc_filter[regidx] |= (1 << bit);
1955                 }
1956
1957                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1958                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1959                                mc_filter[i]);
1960                 }
1961
1962                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
1963         }
1964
1965         if (rx_mode != bp->rx_mode) {
1966                 bp->rx_mode = rx_mode;
1967                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
1968         }
1969
1970         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
1971         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1972         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1973
1974         spin_unlock_bh(&bp->phy_lock);
1975 }
1976
1977 static void
1978 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
1979         u32 rv2p_proc)
1980 {
1981         int i;
1982         u32 val;
1983
1984
1985         for (i = 0; i < rv2p_code_len; i += 8) {
1986                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
1987                 rv2p_code++;
1988                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
1989                 rv2p_code++;
1990
1991                 if (rv2p_proc == RV2P_PROC1) {
1992                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
1993                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
1994                 }
1995                 else {
1996                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
1997                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
1998                 }
1999         }
2000
2001         /* Reset the processor, un-stall is done later. */
2002         if (rv2p_proc == RV2P_PROC1) {
2003                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2004         }
2005         else {
2006                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2007         }
2008 }
2009
2010 static void
2011 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2012 {
2013         u32 offset;
2014         u32 val;
2015
2016         /* Halt the CPU. */
2017         val = REG_RD_IND(bp, cpu_reg->mode);
2018         val |= cpu_reg->mode_value_halt;
2019         REG_WR_IND(bp, cpu_reg->mode, val);
2020         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2021
2022         /* Load the Text area. */
2023         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2024         if (fw->text) {
2025                 int j;
2026
2027                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2028                         REG_WR_IND(bp, offset, fw->text[j]);
2029                 }
2030         }
2031
2032         /* Load the Data area. */
2033         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2034         if (fw->data) {
2035                 int j;
2036
2037                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2038                         REG_WR_IND(bp, offset, fw->data[j]);
2039                 }
2040         }
2041
2042         /* Load the SBSS area. */
2043         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2044         if (fw->sbss) {
2045                 int j;
2046
2047                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2048                         REG_WR_IND(bp, offset, fw->sbss[j]);
2049                 }
2050         }
2051
2052         /* Load the BSS area. */
2053         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2054         if (fw->bss) {
2055                 int j;
2056
2057                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2058                         REG_WR_IND(bp, offset, fw->bss[j]);
2059                 }
2060         }
2061
2062         /* Load the Read-Only area. */
2063         offset = cpu_reg->spad_base +
2064                 (fw->rodata_addr - cpu_reg->mips_view_base);
2065         if (fw->rodata) {
2066                 int j;
2067
2068                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2069                         REG_WR_IND(bp, offset, fw->rodata[j]);
2070                 }
2071         }
2072
2073         /* Clear the pre-fetch instruction. */
2074         REG_WR_IND(bp, cpu_reg->inst, 0);
2075         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2076
2077         /* Start the CPU. */
2078         val = REG_RD_IND(bp, cpu_reg->mode);
2079         val &= ~cpu_reg->mode_value_halt;
2080         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2081         REG_WR_IND(bp, cpu_reg->mode, val);
2082 }
2083
2084 static void
2085 bnx2_init_cpus(struct bnx2 *bp)
2086 {
2087         struct cpu_reg cpu_reg;
2088         struct fw_info fw;
2089
2090         /* Initialize the RV2P processor. */
2091         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2092         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2093
2094         /* Initialize the RX Processor. */
2095         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2096         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2097         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2098         cpu_reg.state = BNX2_RXP_CPU_STATE;
2099         cpu_reg.state_value_clear = 0xffffff;
2100         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2101         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2102         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2103         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2104         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2105         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2106         cpu_reg.mips_view_base = 0x8000000;
2107     
2108         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2109         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2110         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2111         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2112
2113         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2114         fw.text_len = bnx2_RXP_b06FwTextLen;
2115         fw.text_index = 0;
2116         fw.text = bnx2_RXP_b06FwText;
2117
2118         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2119         fw.data_len = bnx2_RXP_b06FwDataLen;
2120         fw.data_index = 0;
2121         fw.data = bnx2_RXP_b06FwData;
2122
2123         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2124         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2125         fw.sbss_index = 0;
2126         fw.sbss = bnx2_RXP_b06FwSbss;
2127
2128         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2129         fw.bss_len = bnx2_RXP_b06FwBssLen;
2130         fw.bss_index = 0;
2131         fw.bss = bnx2_RXP_b06FwBss;
2132
2133         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2134         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2135         fw.rodata_index = 0;
2136         fw.rodata = bnx2_RXP_b06FwRodata;
2137
2138         load_cpu_fw(bp, &cpu_reg, &fw);
2139
2140         /* Initialize the TX Processor. */
2141         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2142         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2143         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2144         cpu_reg.state = BNX2_TXP_CPU_STATE;
2145         cpu_reg.state_value_clear = 0xffffff;
2146         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2147         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2148         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2149         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2150         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2151         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2152         cpu_reg.mips_view_base = 0x8000000;
2153     
2154         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2155         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2156         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2157         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2158
2159         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2160         fw.text_len = bnx2_TXP_b06FwTextLen;
2161         fw.text_index = 0;
2162         fw.text = bnx2_TXP_b06FwText;
2163
2164         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2165         fw.data_len = bnx2_TXP_b06FwDataLen;
2166         fw.data_index = 0;
2167         fw.data = bnx2_TXP_b06FwData;
2168
2169         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2170         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2171         fw.sbss_index = 0;
2172         fw.sbss = bnx2_TXP_b06FwSbss;
2173
2174         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2175         fw.bss_len = bnx2_TXP_b06FwBssLen;
2176         fw.bss_index = 0;
2177         fw.bss = bnx2_TXP_b06FwBss;
2178
2179         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2180         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2181         fw.rodata_index = 0;
2182         fw.rodata = bnx2_TXP_b06FwRodata;
2183
2184         load_cpu_fw(bp, &cpu_reg, &fw);
2185
2186         /* Initialize the TX Patch-up Processor. */
2187         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2188         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2189         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2190         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2191         cpu_reg.state_value_clear = 0xffffff;
2192         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2193         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2194         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2195         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2196         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2197         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2198         cpu_reg.mips_view_base = 0x8000000;
2199     
2200         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2201         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2202         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2203         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2204
2205         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2206         fw.text_len = bnx2_TPAT_b06FwTextLen;
2207         fw.text_index = 0;
2208         fw.text = bnx2_TPAT_b06FwText;
2209
2210         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2211         fw.data_len = bnx2_TPAT_b06FwDataLen;
2212         fw.data_index = 0;
2213         fw.data = bnx2_TPAT_b06FwData;
2214
2215         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2216         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2217         fw.sbss_index = 0;
2218         fw.sbss = bnx2_TPAT_b06FwSbss;
2219
2220         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2221         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2222         fw.bss_index = 0;
2223         fw.bss = bnx2_TPAT_b06FwBss;
2224
2225         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2226         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2227         fw.rodata_index = 0;
2228         fw.rodata = bnx2_TPAT_b06FwRodata;
2229
2230         load_cpu_fw(bp, &cpu_reg, &fw);
2231
2232         /* Initialize the Completion Processor. */
2233         cpu_reg.mode = BNX2_COM_CPU_MODE;
2234         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2235         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2236         cpu_reg.state = BNX2_COM_CPU_STATE;
2237         cpu_reg.state_value_clear = 0xffffff;
2238         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2239         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2240         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2241         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2242         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2243         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2244         cpu_reg.mips_view_base = 0x8000000;
2245     
2246         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2247         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2248         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2249         fw.start_addr = bnx2_COM_b06FwStartAddr;
2250
2251         fw.text_addr = bnx2_COM_b06FwTextAddr;
2252         fw.text_len = bnx2_COM_b06FwTextLen;
2253         fw.text_index = 0;
2254         fw.text = bnx2_COM_b06FwText;
2255
2256         fw.data_addr = bnx2_COM_b06FwDataAddr;
2257         fw.data_len = bnx2_COM_b06FwDataLen;
2258         fw.data_index = 0;
2259         fw.data = bnx2_COM_b06FwData;
2260
2261         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2262         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2263         fw.sbss_index = 0;
2264         fw.sbss = bnx2_COM_b06FwSbss;
2265
2266         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2267         fw.bss_len = bnx2_COM_b06FwBssLen;
2268         fw.bss_index = 0;
2269         fw.bss = bnx2_COM_b06FwBss;
2270
2271         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2272         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2273         fw.rodata_index = 0;
2274         fw.rodata = bnx2_COM_b06FwRodata;
2275
2276         load_cpu_fw(bp, &cpu_reg, &fw);
2277
2278 }
2279
2280 static int
2281 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2282 {
2283         u16 pmcsr;
2284
2285         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2286
2287         switch (state) {
2288         case PCI_D0: {
2289                 u32 val;
2290
2291                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2292                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2293                         PCI_PM_CTRL_PME_STATUS);
2294
2295                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2296                         /* delay required during transition out of D3hot */
2297                         msleep(20);
2298
2299                 val = REG_RD(bp, BNX2_EMAC_MODE);
2300                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2301                 val &= ~BNX2_EMAC_MODE_MPKT;
2302                 REG_WR(bp, BNX2_EMAC_MODE, val);
2303
2304                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2305                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2306                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2307                 break;
2308         }
2309         case PCI_D3hot: {
2310                 int i;
2311                 u32 val, wol_msg;
2312
2313                 if (bp->wol) {
2314                         u32 advertising;
2315                         u8 autoneg;
2316
2317                         autoneg = bp->autoneg;
2318                         advertising = bp->advertising;
2319
2320                         bp->autoneg = AUTONEG_SPEED;
2321                         bp->advertising = ADVERTISED_10baseT_Half |
2322                                 ADVERTISED_10baseT_Full |
2323                                 ADVERTISED_100baseT_Half |
2324                                 ADVERTISED_100baseT_Full |
2325                                 ADVERTISED_Autoneg;
2326
2327                         bnx2_setup_copper_phy(bp);
2328
2329                         bp->autoneg = autoneg;
2330                         bp->advertising = advertising;
2331
2332                         bnx2_set_mac_addr(bp);
2333
2334                         val = REG_RD(bp, BNX2_EMAC_MODE);
2335
2336                         /* Enable port mode. */
2337                         val &= ~BNX2_EMAC_MODE_PORT;
2338                         val |= BNX2_EMAC_MODE_PORT_MII |
2339                                BNX2_EMAC_MODE_MPKT_RCVD |
2340                                BNX2_EMAC_MODE_ACPI_RCVD |
2341                                BNX2_EMAC_MODE_FORCE_LINK |
2342                                BNX2_EMAC_MODE_MPKT;
2343
2344                         REG_WR(bp, BNX2_EMAC_MODE, val);
2345
2346                         /* receive all multicast */
2347                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2348                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2349                                        0xffffffff);
2350                         }
2351                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2352                                BNX2_EMAC_RX_MODE_SORT_MODE);
2353
2354                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2355                               BNX2_RPM_SORT_USER0_MC_EN;
2356                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2357                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2358                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2359                                BNX2_RPM_SORT_USER0_ENA);
2360
2361                         /* Need to enable EMAC and RPM for WOL. */
2362                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2363                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2364                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2365                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2366
2367                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2368                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2369                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2370
2371                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2372                 }
2373                 else {
2374                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2375                 }
2376
2377                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg);
2378
2379                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2380                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2381                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2382
2383                         if (bp->wol)
2384                                 pmcsr |= 3;
2385                 }
2386                 else {
2387                         pmcsr |= 3;
2388                 }
2389                 if (bp->wol) {
2390                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2391                 }
2392                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2393                                       pmcsr);
2394
2395                 /* No more memory access after this point until
2396                  * device is brought back to D0.
2397                  */
2398                 udelay(50);
2399                 break;
2400         }
2401         default:
2402                 return -EINVAL;
2403         }
2404         return 0;
2405 }
2406
2407 static int
2408 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2409 {
2410         u32 val;
2411         int j;
2412
2413         /* Request access to the flash interface. */
2414         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2415         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2416                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2417                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2418                         break;
2419
2420                 udelay(5);
2421         }
2422
2423         if (j >= NVRAM_TIMEOUT_COUNT)
2424                 return -EBUSY;
2425
2426         return 0;
2427 }
2428
2429 static int
2430 bnx2_release_nvram_lock(struct bnx2 *bp)
2431 {
2432         int j;
2433         u32 val;
2434
2435         /* Relinquish nvram interface. */
2436         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2437
2438         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2439                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2440                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2441                         break;
2442
2443                 udelay(5);
2444         }
2445
2446         if (j >= NVRAM_TIMEOUT_COUNT)
2447                 return -EBUSY;
2448
2449         return 0;
2450 }
2451
2452
2453 static int
2454 bnx2_enable_nvram_write(struct bnx2 *bp)
2455 {
2456         u32 val;
2457
2458         val = REG_RD(bp, BNX2_MISC_CFG);
2459         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2460
2461         if (!bp->flash_info->buffered) {
2462                 int j;
2463
2464                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2465                 REG_WR(bp, BNX2_NVM_COMMAND,
2466                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2467
2468                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2469                         udelay(5);
2470
2471                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2472                         if (val & BNX2_NVM_COMMAND_DONE)
2473                                 break;
2474                 }
2475
2476                 if (j >= NVRAM_TIMEOUT_COUNT)
2477                         return -EBUSY;
2478         }
2479         return 0;
2480 }
2481
2482 static void
2483 bnx2_disable_nvram_write(struct bnx2 *bp)
2484 {
2485         u32 val;
2486
2487         val = REG_RD(bp, BNX2_MISC_CFG);
2488         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2489 }
2490
2491
2492 static void
2493 bnx2_enable_nvram_access(struct bnx2 *bp)
2494 {
2495         u32 val;
2496
2497         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2498         /* Enable both bits, even on read. */
2499         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2500                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2501 }
2502
2503 static void
2504 bnx2_disable_nvram_access(struct bnx2 *bp)
2505 {
2506         u32 val;
2507
2508         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2509         /* Disable both bits, even after read. */
2510         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2511                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2512                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2513 }
2514
2515 static int
2516 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2517 {
2518         u32 cmd;
2519         int j;
2520
2521         if (bp->flash_info->buffered)
2522                 /* Buffered flash, no erase needed */
2523                 return 0;
2524
2525         /* Build an erase command */
2526         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2527               BNX2_NVM_COMMAND_DOIT;
2528
2529         /* Need to clear DONE bit separately. */
2530         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2531
2532         /* Address of the NVRAM to read from. */
2533         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2534
2535         /* Issue an erase command. */
2536         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2537
2538         /* Wait for completion. */
2539         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2540                 u32 val;
2541
2542                 udelay(5);
2543
2544                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2545                 if (val & BNX2_NVM_COMMAND_DONE)
2546                         break;
2547         }
2548
2549         if (j >= NVRAM_TIMEOUT_COUNT)
2550                 return -EBUSY;
2551
2552         return 0;
2553 }
2554
2555 static int
2556 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2557 {
2558         u32 cmd;
2559         int j;
2560
2561         /* Build the command word. */
2562         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2563
2564         /* Calculate an offset of a buffered flash. */
2565         if (bp->flash_info->buffered) {
2566                 offset = ((offset / bp->flash_info->page_size) <<
2567                            bp->flash_info->page_bits) +
2568                           (offset % bp->flash_info->page_size);
2569         }
2570
2571         /* Need to clear DONE bit separately. */
2572         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2573
2574         /* Address of the NVRAM to read from. */
2575         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2576
2577         /* Issue a read command. */
2578         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2579
2580         /* Wait for completion. */
2581         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2582                 u32 val;
2583
2584                 udelay(5);
2585
2586                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2587                 if (val & BNX2_NVM_COMMAND_DONE) {
2588                         val = REG_RD(bp, BNX2_NVM_READ);
2589
2590                         val = be32_to_cpu(val);
2591                         memcpy(ret_val, &val, 4);
2592                         break;
2593                 }
2594         }
2595         if (j >= NVRAM_TIMEOUT_COUNT)
2596                 return -EBUSY;
2597
2598         return 0;
2599 }
2600
2601
2602 static int
2603 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2604 {
2605         u32 cmd, val32;
2606         int j;
2607
2608         /* Build the command word. */
2609         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2610
2611         /* Calculate an offset of a buffered flash. */
2612         if (bp->flash_info->buffered) {
2613                 offset = ((offset / bp->flash_info->page_size) <<
2614                           bp->flash_info->page_bits) +
2615                          (offset % bp->flash_info->page_size);
2616         }
2617
2618         /* Need to clear DONE bit separately. */
2619         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2620
2621         memcpy(&val32, val, 4);
2622         val32 = cpu_to_be32(val32);
2623
2624         /* Write the data. */
2625         REG_WR(bp, BNX2_NVM_WRITE, val32);
2626
2627         /* Address of the NVRAM to write to. */
2628         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2629
2630         /* Issue the write command. */
2631         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2632
2633         /* Wait for completion. */
2634         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2635                 udelay(5);
2636
2637                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2638                         break;
2639         }
2640         if (j >= NVRAM_TIMEOUT_COUNT)
2641                 return -EBUSY;
2642
2643         return 0;
2644 }
2645
2646 static int
2647 bnx2_init_nvram(struct bnx2 *bp)
2648 {
2649         u32 val;
2650         int j, entry_count, rc;
2651         struct flash_spec *flash;
2652
2653         /* Determine the selected interface. */
2654         val = REG_RD(bp, BNX2_NVM_CFG1);
2655
2656         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2657
2658         rc = 0;
2659         if (val & 0x40000000) {
2660
2661                 /* Flash interface has been reconfigured */
2662                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2663                      j++, flash++) {
2664                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2665                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2666                                 bp->flash_info = flash;
2667                                 break;
2668                         }
2669                 }
2670         }
2671         else {
2672                 u32 mask;
2673                 /* Not yet been reconfigured */
2674
2675                 if (val & (1 << 23))
2676                         mask = FLASH_BACKUP_STRAP_MASK;
2677                 else
2678                         mask = FLASH_STRAP_MASK;
2679
2680                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2681                         j++, flash++) {
2682
2683                         if ((val & mask) == (flash->strapping & mask)) {
2684                                 bp->flash_info = flash;
2685
2686                                 /* Request access to the flash interface. */
2687                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2688                                         return rc;
2689
2690                                 /* Enable access to flash interface */
2691                                 bnx2_enable_nvram_access(bp);
2692
2693                                 /* Reconfigure the flash interface */
2694                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2695                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2696                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2697                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2698
2699                                 /* Disable access to flash interface */
2700                                 bnx2_disable_nvram_access(bp);
2701                                 bnx2_release_nvram_lock(bp);
2702
2703                                 break;
2704                         }
2705                 }
2706         } /* if (val & 0x40000000) */
2707
2708         if (j == entry_count) {
2709                 bp->flash_info = NULL;
2710                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2711                 rc = -ENODEV;
2712         }
2713
2714         return rc;
2715 }
2716
2717 static int
2718 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2719                 int buf_size)
2720 {
2721         int rc = 0;
2722         u32 cmd_flags, offset32, len32, extra;
2723
2724         if (buf_size == 0)
2725                 return 0;
2726
2727         /* Request access to the flash interface. */
2728         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2729                 return rc;
2730
2731         /* Enable access to flash interface */
2732         bnx2_enable_nvram_access(bp);
2733
2734         len32 = buf_size;
2735         offset32 = offset;
2736         extra = 0;
2737
2738         cmd_flags = 0;
2739
2740         if (offset32 & 3) {
2741                 u8 buf[4];
2742                 u32 pre_len;
2743
2744                 offset32 &= ~3;
2745                 pre_len = 4 - (offset & 3);
2746
2747                 if (pre_len >= len32) {
2748                         pre_len = len32;
2749                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2750                                     BNX2_NVM_COMMAND_LAST;
2751                 }
2752                 else {
2753                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2754                 }
2755
2756                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2757
2758                 if (rc)
2759                         return rc;
2760
2761                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2762
2763                 offset32 += 4;
2764                 ret_buf += pre_len;
2765                 len32 -= pre_len;
2766         }
2767         if (len32 & 3) {
2768                 extra = 4 - (len32 & 3);
2769                 len32 = (len32 + 4) & ~3;
2770         }
2771
2772         if (len32 == 4) {
2773                 u8 buf[4];
2774
2775                 if (cmd_flags)
2776                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2777                 else
2778                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2779                                     BNX2_NVM_COMMAND_LAST;
2780
2781                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2782
2783                 memcpy(ret_buf, buf, 4 - extra);
2784         }
2785         else if (len32 > 0) {
2786                 u8 buf[4];
2787
2788                 /* Read the first word. */
2789                 if (cmd_flags)
2790                         cmd_flags = 0;
2791                 else
2792                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2793
2794                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2795
2796                 /* Advance to the next dword. */
2797                 offset32 += 4;
2798                 ret_buf += 4;
2799                 len32 -= 4;
2800
2801                 while (len32 > 4 && rc == 0) {
2802                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2803
2804                         /* Advance to the next dword. */
2805                         offset32 += 4;
2806                         ret_buf += 4;
2807                         len32 -= 4;
2808                 }
2809
2810                 if (rc)
2811                         return rc;
2812
2813                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2814                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2815
2816                 memcpy(ret_buf, buf, 4 - extra);
2817         }
2818
2819         /* Disable access to flash interface */
2820         bnx2_disable_nvram_access(bp);
2821
2822         bnx2_release_nvram_lock(bp);
2823
2824         return rc;
2825 }
2826
2827 static int
2828 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2829                 int buf_size)
2830 {
2831         u32 written, offset32, len32;
2832         u8 *buf, start[4], end[4];
2833         int rc = 0;
2834         int align_start, align_end;
2835
2836         buf = data_buf;
2837         offset32 = offset;
2838         len32 = buf_size;
2839         align_start = align_end = 0;
2840
2841         if ((align_start = (offset32 & 3))) {
2842                 offset32 &= ~3;
2843                 len32 += align_start;
2844                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2845                         return rc;
2846         }
2847
2848         if (len32 & 3) {
2849                 if ((len32 > 4) || !align_start) {
2850                         align_end = 4 - (len32 & 3);
2851                         len32 += align_end;
2852                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2853                                 end, 4))) {
2854                                 return rc;
2855                         }
2856                 }
2857         }
2858
2859         if (align_start || align_end) {
2860                 buf = kmalloc(len32, GFP_KERNEL);
2861                 if (buf == 0)
2862                         return -ENOMEM;
2863                 if (align_start) {
2864                         memcpy(buf, start, 4);
2865                 }
2866                 if (align_end) {
2867                         memcpy(buf + len32 - 4, end, 4);
2868                 }
2869                 memcpy(buf + align_start, data_buf, buf_size);
2870         }
2871
2872         written = 0;
2873         while ((written < len32) && (rc == 0)) {
2874                 u32 page_start, page_end, data_start, data_end;
2875                 u32 addr, cmd_flags;
2876                 int i;
2877                 u8 flash_buffer[264];
2878
2879                 /* Find the page_start addr */
2880                 page_start = offset32 + written;
2881                 page_start -= (page_start % bp->flash_info->page_size);
2882                 /* Find the page_end addr */
2883                 page_end = page_start + bp->flash_info->page_size;
2884                 /* Find the data_start addr */
2885                 data_start = (written == 0) ? offset32 : page_start;
2886                 /* Find the data_end addr */
2887                 data_end = (page_end > offset32 + len32) ? 
2888                         (offset32 + len32) : page_end;
2889
2890                 /* Request access to the flash interface. */
2891                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2892                         goto nvram_write_end;
2893
2894                 /* Enable access to flash interface */
2895                 bnx2_enable_nvram_access(bp);
2896
2897                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2898                 if (bp->flash_info->buffered == 0) {
2899                         int j;
2900
2901                         /* Read the whole page into the buffer
2902                          * (non-buffer flash only) */
2903                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
2904                                 if (j == (bp->flash_info->page_size - 4)) {
2905                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
2906                                 }
2907                                 rc = bnx2_nvram_read_dword(bp,
2908                                         page_start + j, 
2909                                         &flash_buffer[j], 
2910                                         cmd_flags);
2911
2912                                 if (rc)
2913                                         goto nvram_write_end;
2914
2915                                 cmd_flags = 0;
2916                         }
2917                 }
2918
2919                 /* Enable writes to flash interface (unlock write-protect) */
2920                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2921                         goto nvram_write_end;
2922
2923                 /* Erase the page */
2924                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2925                         goto nvram_write_end;
2926
2927                 /* Re-enable the write again for the actual write */
2928                 bnx2_enable_nvram_write(bp);
2929
2930                 /* Loop to write back the buffer data from page_start to
2931                  * data_start */
2932                 i = 0;
2933                 if (bp->flash_info->buffered == 0) {
2934                         for (addr = page_start; addr < data_start;
2935                                 addr += 4, i += 4) {
2936                                 
2937                                 rc = bnx2_nvram_write_dword(bp, addr,
2938                                         &flash_buffer[i], cmd_flags);
2939
2940                                 if (rc != 0)
2941                                         goto nvram_write_end;
2942
2943                                 cmd_flags = 0;
2944                         }
2945                 }
2946
2947                 /* Loop to write the new data from data_start to data_end */
2948                 for (addr = data_start; addr < data_end; addr += 4, i++) {
2949                         if ((addr == page_end - 4) ||
2950                                 ((bp->flash_info->buffered) &&
2951                                  (addr == data_end - 4))) {
2952
2953                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2954                         }
2955                         rc = bnx2_nvram_write_dword(bp, addr, buf,
2956                                 cmd_flags);
2957
2958                         if (rc != 0)
2959                                 goto nvram_write_end;
2960
2961                         cmd_flags = 0;
2962                         buf += 4;
2963                 }
2964
2965                 /* Loop to write back the buffer data from data_end
2966                  * to page_end */
2967                 if (bp->flash_info->buffered == 0) {
2968                         for (addr = data_end; addr < page_end;
2969                                 addr += 4, i += 4) {
2970                         
2971                                 if (addr == page_end-4) {
2972                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2973                                 }
2974                                 rc = bnx2_nvram_write_dword(bp, addr,
2975                                         &flash_buffer[i], cmd_flags);
2976
2977                                 if (rc != 0)
2978                                         goto nvram_write_end;
2979
2980                                 cmd_flags = 0;
2981                         }
2982                 }
2983
2984                 /* Disable writes to flash interface (lock write-protect) */
2985                 bnx2_disable_nvram_write(bp);
2986
2987                 /* Disable access to flash interface */
2988                 bnx2_disable_nvram_access(bp);
2989                 bnx2_release_nvram_lock(bp);
2990
2991                 /* Increment written */
2992                 written += data_end - data_start;
2993         }
2994
2995 nvram_write_end:
2996         if (align_start || align_end)
2997                 kfree(buf);
2998         return rc;
2999 }
3000
3001 static int
3002 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3003 {
3004         u32 val;
3005         int i, rc = 0;
3006
3007         /* Wait for the current PCI transaction to complete before
3008          * issuing a reset. */
3009         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3010                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3011                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3012                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3013                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3014         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3015         udelay(5);
3016
3017         /* Deposit a driver reset signature so the firmware knows that
3018          * this is a soft reset. */
3019         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3020                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3021
3022         bp->fw_timed_out = 0;
3023
3024         /* Wait for the firmware to tell us it is ok to issue a reset. */
3025         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code);
3026
3027         /* Do a dummy read to force the chip to complete all current transaction
3028          * before we issue a reset. */
3029         val = REG_RD(bp, BNX2_MISC_ID);
3030
3031         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3032               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3033               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3034
3035         /* Chip reset. */
3036         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3037
3038         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3039             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3040                 msleep(15);
3041
3042         /* Reset takes approximate 30 usec */
3043         for (i = 0; i < 10; i++) {
3044                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3045                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3046                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3047                         break;
3048                 }
3049                 udelay(10);
3050         }
3051
3052         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3053                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3054                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3055                 return -EBUSY;
3056         }
3057
3058         /* Make sure byte swapping is properly configured. */
3059         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3060         if (val != 0x01020304) {
3061                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3062                 return -ENODEV;
3063         }
3064
3065         bp->fw_timed_out = 0;
3066
3067         /* Wait for the firmware to finish its initialization. */
3068         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code);
3069
3070         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3071                 /* Adjust the voltage regular to two steps lower.  The default
3072                  * of this register is 0x0000000e. */
3073                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3074
3075                 /* Remove bad rbuf memory from the free pool. */
3076                 rc = bnx2_alloc_bad_rbuf(bp);
3077         }
3078
3079         return rc;
3080 }
3081
3082 static int
3083 bnx2_init_chip(struct bnx2 *bp)
3084 {
3085         u32 val;
3086
3087         /* Make sure the interrupt is not active. */
3088         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3089
3090         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3091               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3092 #ifdef __BIG_ENDIAN
3093               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3094 #endif
3095               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3096               DMA_READ_CHANS << 12 |
3097               DMA_WRITE_CHANS << 16;
3098
3099         val |= (0x2 << 20) | (1 << 11);
3100
3101         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz = 133))
3102                 val |= (1 << 23);
3103
3104         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3105             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3106                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3107
3108         REG_WR(bp, BNX2_DMA_CONFIG, val);
3109
3110         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3111                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3112                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3113                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3114         }
3115
3116         if (bp->flags & PCIX_FLAG) {
3117                 u16 val16;
3118
3119                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3120                                      &val16);
3121                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3122                                       val16 & ~PCI_X_CMD_ERO);
3123         }
3124
3125         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3126                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3127                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3128                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3129
3130         /* Initialize context mapping and zero out the quick contexts.  The
3131          * context block must have already been enabled. */
3132         bnx2_init_context(bp);
3133
3134         bnx2_init_cpus(bp);
3135         bnx2_init_nvram(bp);
3136
3137         bnx2_set_mac_addr(bp);
3138
3139         val = REG_RD(bp, BNX2_MQ_CONFIG);
3140         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3141         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3142         REG_WR(bp, BNX2_MQ_CONFIG, val);
3143
3144         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3145         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3146         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3147
3148         val = (BCM_PAGE_BITS - 8) << 24;
3149         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3150
3151         /* Configure page size. */
3152         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3153         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3154         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3155         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3156
3157         val = bp->mac_addr[0] +
3158               (bp->mac_addr[1] << 8) +
3159               (bp->mac_addr[2] << 16) +
3160               bp->mac_addr[3] +
3161               (bp->mac_addr[4] << 8) +
3162               (bp->mac_addr[5] << 16);
3163         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3164
3165         /* Program the MTU.  Also include 4 bytes for CRC32. */
3166         val = bp->dev->mtu + ETH_HLEN + 4;
3167         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3168                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3169         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3170
3171         bp->last_status_idx = 0;
3172         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3173
3174         /* Set up how to generate a link change interrupt. */
3175         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3176
3177         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3178                (u64) bp->status_blk_mapping & 0xffffffff);
3179         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3180
3181         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3182                (u64) bp->stats_blk_mapping & 0xffffffff);
3183         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3184                (u64) bp->stats_blk_mapping >> 32);
3185
3186         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3187                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3188
3189         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3190                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3191
3192         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3193                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3194
3195         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3196
3197         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3198
3199         REG_WR(bp, BNX2_HC_COM_TICKS,
3200                (bp->com_ticks_int << 16) | bp->com_ticks);
3201
3202         REG_WR(bp, BNX2_HC_CMD_TICKS,
3203                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3204
3205         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3206         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3207
3208         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3209                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3210         else {
3211                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3212                        BNX2_HC_CONFIG_TX_TMR_MODE |
3213                        BNX2_HC_CONFIG_COLLECT_STATS);
3214         }
3215
3216         /* Clear internal stats counters. */
3217         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3218
3219         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3220
3221         /* Initialize the receive filter. */
3222         bnx2_set_rx_mode(bp->dev);
3223
3224         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET);
3225
3226         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3227         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3228
3229         udelay(20);
3230
3231         return 0;
3232 }
3233
3234
3235 static void
3236 bnx2_init_tx_ring(struct bnx2 *bp)
3237 {
3238         struct tx_bd *txbd;
3239         u32 val;
3240
3241         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3242                 
3243         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3244         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3245
3246         bp->tx_prod = 0;
3247         bp->tx_cons = 0;
3248         bp->hw_tx_cons = 0;
3249         bp->tx_prod_bseq = 0;
3250         
3251         val = BNX2_L2CTX_TYPE_TYPE_L2;
3252         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3253         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3254
3255         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3256         val |= 8 << 16;
3257         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3258
3259         val = (u64) bp->tx_desc_mapping >> 32;
3260         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3261
3262         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3263         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3264 }
3265
3266 static void
3267 bnx2_init_rx_ring(struct bnx2 *bp)
3268 {
3269         struct rx_bd *rxbd;
3270         int i;
3271         u16 prod, ring_prod; 
3272         u32 val;
3273
3274         /* 8 for CRC and VLAN */
3275         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3276         /* 8 for alignment */
3277         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3278
3279         ring_prod = prod = bp->rx_prod = 0;
3280         bp->rx_cons = 0;
3281         bp->hw_rx_cons = 0;
3282         bp->rx_prod_bseq = 0;
3283                 
3284         rxbd = &bp->rx_desc_ring[0];
3285         for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3286                 rxbd->rx_bd_len = bp->rx_buf_use_size;
3287                 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3288         }
3289
3290         rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3291         rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3292
3293         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3294         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3295         val |= 0x02 << 8;
3296         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3297
3298         val = (u64) bp->rx_desc_mapping >> 32;
3299         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3300
3301         val = (u64) bp->rx_desc_mapping & 0xffffffff;
3302         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3303
3304         for ( ;ring_prod < bp->rx_ring_size; ) {
3305                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3306                         break;
3307                 }
3308                 prod = NEXT_RX_BD(prod);
3309                 ring_prod = RX_RING_IDX(prod);
3310         }
3311         bp->rx_prod = prod;
3312
3313         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3314
3315         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3316 }
3317
3318 static void
3319 bnx2_free_tx_skbs(struct bnx2 *bp)
3320 {
3321         int i;
3322
3323         if (bp->tx_buf_ring == NULL)
3324                 return;
3325
3326         for (i = 0; i < TX_DESC_CNT; ) {
3327                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3328                 struct sk_buff *skb = tx_buf->skb;
3329                 int j, last;
3330
3331                 if (skb == NULL) {
3332                         i++;
3333                         continue;
3334                 }
3335
3336                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3337                         skb_headlen(skb), PCI_DMA_TODEVICE);
3338
3339                 tx_buf->skb = NULL;
3340
3341                 last = skb_shinfo(skb)->nr_frags;
3342                 for (j = 0; j < last; j++) {
3343                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3344                         pci_unmap_page(bp->pdev,
3345                                 pci_unmap_addr(tx_buf, mapping),
3346                                 skb_shinfo(skb)->frags[j].size,
3347                                 PCI_DMA_TODEVICE);
3348                 }
3349