1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.5"
58 #define DRV_MODULE_RELDATE "September 20, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
129 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
130 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
132 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
133 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
134 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
136 /* Expansion entry 0001 */
137 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
138 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
141 /* Saifun SA25F010 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
146 "Non-buffered flash (128kB)"},
147 /* Saifun SA25F020 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
152 "Non-buffered flash (256kB)"},
153 /* Expansion entry 0100 */
154 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
158 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
159 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
162 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
163 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
164 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
166 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
167 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
168 /* Saifun SA25F005 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
173 "Non-buffered flash (64kB)"},
175 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
176 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
177 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179 /* Expansion entry 1001 */
180 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184 /* Expansion entry 1010 */
185 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189 /* ATMEL AT45DB011B (buffered flash) */
190 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
191 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
192 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
193 "Buffered flash (128kB)"},
194 /* Expansion entry 1100 */
195 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
196 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 /* Expansion entry 1101 */
200 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 /* Ateml Expansion entry 1110 */
205 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
206 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1110 (Atmel)"},
209 /* ATMEL AT45DB021B (buffered flash) */
210 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
213 "Buffered flash (256kB)"},
216 static struct flash_spec flash_5709 = {
217 .flags = BNX2_NV_BUFFERED,
218 .page_bits = BCM5709_FLASH_PAGE_BITS,
219 .page_size = BCM5709_FLASH_PAGE_SIZE,
220 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
221 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
222 .name = "5709 Buffered flash (256kB)",
225 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
227 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
233 /* The ring uses 256 indices for 255 entries, one of them
234 * needs to be skipped.
236 diff = bp->tx_prod - bp->tx_cons;
237 if (unlikely(diff >= TX_DESC_CNT)) {
239 if (diff == TX_DESC_CNT)
240 diff = MAX_TX_DESC_CNT;
242 return (bp->tx_ring_size - diff);
246 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
250 spin_lock_bh(&bp->indirect_lock);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
252 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
253 spin_unlock_bh(&bp->indirect_lock);
258 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
260 spin_lock_bh(&bp->indirect_lock);
261 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
263 spin_unlock_bh(&bp->indirect_lock);
267 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 spin_lock_bh(&bp->indirect_lock);
271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
275 REG_WR(bp, BNX2_CTX_CTX_CTRL,
276 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
277 for (i = 0; i < 5; i++) {
279 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
280 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
285 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
286 REG_WR(bp, BNX2_CTX_DATA, val);
288 spin_unlock_bh(&bp->indirect_lock);
292 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
297 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
299 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
301 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
302 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
307 val1 = (bp->phy_addr << 21) | (reg << 16) |
308 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
309 BNX2_EMAC_MDIO_COMM_START_BUSY;
310 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
312 for (i = 0; i < 50; i++) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
316 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
320 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
326 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
335 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
354 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
355 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
356 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
358 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
359 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
365 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
366 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
367 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
369 for (i = 0; i < 50; i++) {
372 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
373 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
379 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
384 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
385 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
388 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
389 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
398 bnx2_disable_int(struct bnx2 *bp)
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
402 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
406 bnx2_enable_int(struct bnx2 *bp)
408 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
410 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
415 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
419 bnx2_disable_int_sync(struct bnx2 *bp)
421 atomic_inc(&bp->intr_sem);
422 bnx2_disable_int(bp);
423 synchronize_irq(bp->pdev->irq);
427 bnx2_netif_stop(struct bnx2 *bp)
429 bnx2_disable_int_sync(bp);
430 if (netif_running(bp->dev)) {
431 napi_disable(&bp->napi);
432 netif_tx_disable(bp->dev);
433 bp->dev->trans_start = jiffies; /* prevent tx timeout */
438 bnx2_netif_start(struct bnx2 *bp)
440 if (atomic_dec_and_test(&bp->intr_sem)) {
441 if (netif_running(bp->dev)) {
442 netif_wake_queue(bp->dev);
443 napi_enable(&bp->napi);
450 bnx2_free_mem(struct bnx2 *bp)
454 for (i = 0; i < bp->ctx_pages; i++) {
455 if (bp->ctx_blk[i]) {
456 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
458 bp->ctx_blk_mapping[i]);
459 bp->ctx_blk[i] = NULL;
462 if (bp->status_blk) {
463 pci_free_consistent(bp->pdev, bp->status_stats_size,
464 bp->status_blk, bp->status_blk_mapping);
465 bp->status_blk = NULL;
466 bp->stats_blk = NULL;
468 if (bp->tx_desc_ring) {
469 pci_free_consistent(bp->pdev,
470 sizeof(struct tx_bd) * TX_DESC_CNT,
471 bp->tx_desc_ring, bp->tx_desc_mapping);
472 bp->tx_desc_ring = NULL;
474 kfree(bp->tx_buf_ring);
475 bp->tx_buf_ring = NULL;
476 for (i = 0; i < bp->rx_max_ring; i++) {
477 if (bp->rx_desc_ring[i])
478 pci_free_consistent(bp->pdev,
479 sizeof(struct rx_bd) * RX_DESC_CNT,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
489 bnx2_alloc_mem(struct bnx2 *bp)
491 int i, status_blk_size;
493 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
495 if (bp->tx_buf_ring == NULL)
498 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
499 sizeof(struct tx_bd) *
501 &bp->tx_desc_mapping);
502 if (bp->tx_desc_ring == NULL)
505 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
507 if (bp->rx_buf_ring == NULL)
510 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
513 for (i = 0; i < bp->rx_max_ring; i++) {
514 bp->rx_desc_ring[i] =
515 pci_alloc_consistent(bp->pdev,
516 sizeof(struct rx_bd) * RX_DESC_CNT,
517 &bp->rx_desc_mapping[i]);
518 if (bp->rx_desc_ring[i] == NULL)
523 /* Combine status and statistics blocks into one allocation. */
524 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
525 bp->status_stats_size = status_blk_size +
526 sizeof(struct statistics_block);
528 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
529 &bp->status_blk_mapping);
530 if (bp->status_blk == NULL)
533 memset(bp->status_blk, 0, bp->status_stats_size);
535 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
538 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
540 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
541 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
542 if (bp->ctx_pages == 0)
544 for (i = 0; i < bp->ctx_pages; i++) {
545 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
547 &bp->ctx_blk_mapping[i]);
548 if (bp->ctx_blk[i] == NULL)
560 bnx2_report_fw_link(struct bnx2 *bp)
562 u32 fw_link_status = 0;
564 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
570 switch (bp->line_speed) {
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_10HALF;
575 fw_link_status = BNX2_LINK_STATUS_10FULL;
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_100HALF;
581 fw_link_status = BNX2_LINK_STATUS_100FULL;
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_1000HALF;
587 fw_link_status = BNX2_LINK_STATUS_1000FULL;
590 if (bp->duplex == DUPLEX_HALF)
591 fw_link_status = BNX2_LINK_STATUS_2500HALF;
593 fw_link_status = BNX2_LINK_STATUS_2500FULL;
597 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
600 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
602 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
603 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
606 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
607 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
609 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
613 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
615 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
619 bnx2_xceiver_str(struct bnx2 *bp)
621 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
622 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
627 bnx2_report_link(struct bnx2 *bp)
630 netif_carrier_on(bp->dev);
631 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
632 bnx2_xceiver_str(bp));
634 printk("%d Mbps ", bp->line_speed);
636 if (bp->duplex == DUPLEX_FULL)
637 printk("full duplex");
639 printk("half duplex");
642 if (bp->flow_ctrl & FLOW_CTRL_RX) {
643 printk(", receive ");
644 if (bp->flow_ctrl & FLOW_CTRL_TX)
645 printk("& transmit ");
648 printk(", transmit ");
650 printk("flow control ON");
655 netif_carrier_off(bp->dev);
656 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
657 bnx2_xceiver_str(bp));
660 bnx2_report_fw_link(bp);
664 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
666 u32 local_adv, remote_adv;
669 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
670 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
672 if (bp->duplex == DUPLEX_FULL) {
673 bp->flow_ctrl = bp->req_flow_ctrl;
678 if (bp->duplex != DUPLEX_FULL) {
682 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
683 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
686 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
687 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
688 bp->flow_ctrl |= FLOW_CTRL_TX;
689 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_RX;
694 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
695 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
697 if (bp->phy_flags & PHY_SERDES_FLAG) {
698 u32 new_local_adv = 0;
699 u32 new_remote_adv = 0;
701 if (local_adv & ADVERTISE_1000XPAUSE)
702 new_local_adv |= ADVERTISE_PAUSE_CAP;
703 if (local_adv & ADVERTISE_1000XPSE_ASYM)
704 new_local_adv |= ADVERTISE_PAUSE_ASYM;
705 if (remote_adv & ADVERTISE_1000XPAUSE)
706 new_remote_adv |= ADVERTISE_PAUSE_CAP;
707 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
708 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
710 local_adv = new_local_adv;
711 remote_adv = new_remote_adv;
714 /* See Table 28B-3 of 802.3ab-1999 spec. */
715 if (local_adv & ADVERTISE_PAUSE_CAP) {
716 if(local_adv & ADVERTISE_PAUSE_ASYM) {
717 if (remote_adv & ADVERTISE_PAUSE_CAP) {
718 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
720 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
721 bp->flow_ctrl = FLOW_CTRL_RX;
725 if (remote_adv & ADVERTISE_PAUSE_CAP) {
726 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
730 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
731 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
732 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
734 bp->flow_ctrl = FLOW_CTRL_TX;
740 bnx2_5709s_linkup(struct bnx2 *bp)
746 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
747 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
750 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
751 bp->line_speed = bp->req_line_speed;
752 bp->duplex = bp->req_duplex;
755 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
757 case MII_BNX2_GP_TOP_AN_SPEED_10:
758 bp->line_speed = SPEED_10;
760 case MII_BNX2_GP_TOP_AN_SPEED_100:
761 bp->line_speed = SPEED_100;
763 case MII_BNX2_GP_TOP_AN_SPEED_1G:
764 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
765 bp->line_speed = SPEED_1000;
767 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
768 bp->line_speed = SPEED_2500;
771 if (val & MII_BNX2_GP_TOP_AN_FD)
772 bp->duplex = DUPLEX_FULL;
774 bp->duplex = DUPLEX_HALF;
779 bnx2_5708s_linkup(struct bnx2 *bp)
784 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
785 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
786 case BCM5708S_1000X_STAT1_SPEED_10:
787 bp->line_speed = SPEED_10;
789 case BCM5708S_1000X_STAT1_SPEED_100:
790 bp->line_speed = SPEED_100;
792 case BCM5708S_1000X_STAT1_SPEED_1G:
793 bp->line_speed = SPEED_1000;
795 case BCM5708S_1000X_STAT1_SPEED_2G5:
796 bp->line_speed = SPEED_2500;
799 if (val & BCM5708S_1000X_STAT1_FD)
800 bp->duplex = DUPLEX_FULL;
802 bp->duplex = DUPLEX_HALF;
808 bnx2_5706s_linkup(struct bnx2 *bp)
810 u32 bmcr, local_adv, remote_adv, common;
813 bp->line_speed = SPEED_1000;
815 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
816 if (bmcr & BMCR_FULLDPLX) {
817 bp->duplex = DUPLEX_FULL;
820 bp->duplex = DUPLEX_HALF;
823 if (!(bmcr & BMCR_ANENABLE)) {
827 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
828 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
830 common = local_adv & remote_adv;
831 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
833 if (common & ADVERTISE_1000XFULL) {
834 bp->duplex = DUPLEX_FULL;
837 bp->duplex = DUPLEX_HALF;
845 bnx2_copper_linkup(struct bnx2 *bp)
849 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
850 if (bmcr & BMCR_ANENABLE) {
851 u32 local_adv, remote_adv, common;
853 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
854 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
856 common = local_adv & (remote_adv >> 2);
857 if (common & ADVERTISE_1000FULL) {
858 bp->line_speed = SPEED_1000;
859 bp->duplex = DUPLEX_FULL;
861 else if (common & ADVERTISE_1000HALF) {
862 bp->line_speed = SPEED_1000;
863 bp->duplex = DUPLEX_HALF;
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
869 common = local_adv & remote_adv;
870 if (common & ADVERTISE_100FULL) {
871 bp->line_speed = SPEED_100;
872 bp->duplex = DUPLEX_FULL;
874 else if (common & ADVERTISE_100HALF) {
875 bp->line_speed = SPEED_100;
876 bp->duplex = DUPLEX_HALF;
878 else if (common & ADVERTISE_10FULL) {
879 bp->line_speed = SPEED_10;
880 bp->duplex = DUPLEX_FULL;
882 else if (common & ADVERTISE_10HALF) {
883 bp->line_speed = SPEED_10;
884 bp->duplex = DUPLEX_HALF;
893 if (bmcr & BMCR_SPEED100) {
894 bp->line_speed = SPEED_100;
897 bp->line_speed = SPEED_10;
899 if (bmcr & BMCR_FULLDPLX) {
900 bp->duplex = DUPLEX_FULL;
903 bp->duplex = DUPLEX_HALF;
911 bnx2_set_mac_link(struct bnx2 *bp)
915 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
916 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
917 (bp->duplex == DUPLEX_HALF)) {
918 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
921 /* Configure the EMAC mode register. */
922 val = REG_RD(bp, BNX2_EMAC_MODE);
924 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
925 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
926 BNX2_EMAC_MODE_25G_MODE);
929 switch (bp->line_speed) {
931 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
932 val |= BNX2_EMAC_MODE_PORT_MII_10M;
937 val |= BNX2_EMAC_MODE_PORT_MII;
940 val |= BNX2_EMAC_MODE_25G_MODE;
943 val |= BNX2_EMAC_MODE_PORT_GMII;
948 val |= BNX2_EMAC_MODE_PORT_GMII;
951 /* Set the MAC to operate in the appropriate duplex mode. */
952 if (bp->duplex == DUPLEX_HALF)
953 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
954 REG_WR(bp, BNX2_EMAC_MODE, val);
956 /* Enable/disable rx PAUSE. */
957 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
959 if (bp->flow_ctrl & FLOW_CTRL_RX)
960 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
961 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
963 /* Enable/disable tx PAUSE. */
964 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
965 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
967 if (bp->flow_ctrl & FLOW_CTRL_TX)
968 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
969 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
971 /* Acknowledge the interrupt. */
972 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
978 bnx2_enable_bmsr1(struct bnx2 *bp)
980 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
981 (CHIP_NUM(bp) == CHIP_NUM_5709))
982 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
983 MII_BNX2_BLK_ADDR_GP_STATUS);
987 bnx2_disable_bmsr1(struct bnx2 *bp)
989 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
990 (CHIP_NUM(bp) == CHIP_NUM_5709))
991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
992 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1001 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1004 if (bp->autoneg & AUTONEG_SPEED)
1005 bp->advertising |= ADVERTISED_2500baseX_Full;
1007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1008 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1010 bnx2_read_phy(bp, bp->mii_up1, &up1);
1011 if (!(up1 & BCM5708S_UP1_2G5)) {
1012 up1 |= BCM5708S_UP1_2G5;
1013 bnx2_write_phy(bp, bp->mii_up1, up1);
1017 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1019 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1025 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1030 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1034 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1036 bnx2_read_phy(bp, bp->mii_up1, &up1);
1037 if (up1 & BCM5708S_UP1_2G5) {
1038 up1 &= ~BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, bp->mii_up1, up1);
1043 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1051 bnx2_enable_forced_2g5(struct bnx2 *bp)
1055 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1062 MII_BNX2_BLK_ADDR_SERDES_DIG);
1063 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1064 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1065 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1066 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1068 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1069 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1070 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1072 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1073 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 bmcr |= BCM5708S_BMCR_FORCE_2500;
1077 if (bp->autoneg & AUTONEG_SPEED) {
1078 bmcr &= ~BMCR_ANENABLE;
1079 if (bp->req_duplex == DUPLEX_FULL)
1080 bmcr |= BMCR_FULLDPLX;
1082 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1086 bnx2_disable_forced_2g5(struct bnx2 *bp)
1090 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1093 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_SERDES_DIG);
1098 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1099 val &= ~MII_BNX2_SD_MISC1_FORCE;
1100 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1102 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1103 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1104 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1106 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1107 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1111 if (bp->autoneg & AUTONEG_SPEED)
1112 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1113 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1117 bnx2_set_link(struct bnx2 *bp)
1122 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1127 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1130 link_up = bp->link_up;
1132 bnx2_enable_bmsr1(bp);
1133 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1134 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1135 bnx2_disable_bmsr1(bp);
1137 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1138 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1141 val = REG_RD(bp, BNX2_EMAC_STATUS);
1142 if (val & BNX2_EMAC_STATUS_LINK)
1143 bmsr |= BMSR_LSTATUS;
1145 bmsr &= ~BMSR_LSTATUS;
1148 if (bmsr & BMSR_LSTATUS) {
1151 if (bp->phy_flags & PHY_SERDES_FLAG) {
1152 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1153 bnx2_5706s_linkup(bp);
1154 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1155 bnx2_5708s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1157 bnx2_5709s_linkup(bp);
1160 bnx2_copper_linkup(bp);
1162 bnx2_resolve_flow_ctrl(bp);
1165 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1166 (bp->autoneg & AUTONEG_SPEED))
1167 bnx2_disable_forced_2g5(bp);
1169 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1173 if (bp->link_up != link_up) {
1174 bnx2_report_link(bp);
1177 bnx2_set_mac_link(bp);
1183 bnx2_reset_phy(struct bnx2 *bp)
1188 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1190 #define PHY_RESET_MAX_WAIT 100
1191 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1194 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1195 if (!(reg & BMCR_RESET)) {
1200 if (i == PHY_RESET_MAX_WAIT) {
1207 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1211 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1212 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1214 if (bp->phy_flags & PHY_SERDES_FLAG) {
1215 adv = ADVERTISE_1000XPAUSE;
1218 adv = ADVERTISE_PAUSE_CAP;
1221 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1222 if (bp->phy_flags & PHY_SERDES_FLAG) {
1223 adv = ADVERTISE_1000XPSE_ASYM;
1226 adv = ADVERTISE_PAUSE_ASYM;
1229 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1230 if (bp->phy_flags & PHY_SERDES_FLAG) {
1231 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1234 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1240 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1243 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1245 u32 speed_arg = 0, pause_adv;
1247 pause_adv = bnx2_phy_get_pause_adv(bp);
1249 if (bp->autoneg & AUTONEG_SPEED) {
1250 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1251 if (bp->advertising & ADVERTISED_10baseT_Half)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1253 if (bp->advertising & ADVERTISED_10baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 if (bp->advertising & ADVERTISED_100baseT_Half)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1257 if (bp->advertising & ADVERTISED_100baseT_Full)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 if (bp->advertising & ADVERTISED_1000baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1261 if (bp->advertising & ADVERTISED_2500baseX_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1264 if (bp->req_line_speed == SPEED_2500)
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 else if (bp->req_line_speed == SPEED_1000)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1268 else if (bp->req_line_speed == SPEED_100) {
1269 if (bp->req_duplex == DUPLEX_FULL)
1270 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1273 } else if (bp->req_line_speed == SPEED_10) {
1274 if (bp->req_duplex == DUPLEX_FULL)
1275 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1281 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1282 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1283 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1286 if (port == PORT_TP)
1287 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1288 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1292 spin_unlock_bh(&bp->phy_lock);
1293 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1294 spin_lock_bh(&bp->phy_lock);
1300 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1305 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1306 return (bnx2_setup_remote_phy(bp, port));
1308 if (!(bp->autoneg & AUTONEG_SPEED)) {
1310 int force_link_down = 0;
1312 if (bp->req_line_speed == SPEED_2500) {
1313 if (!bnx2_test_and_enable_2g5(bp))
1314 force_link_down = 1;
1315 } else if (bp->req_line_speed == SPEED_1000) {
1316 if (bnx2_test_and_disable_2g5(bp))
1317 force_link_down = 1;
1319 bnx2_read_phy(bp, bp->mii_adv, &adv);
1320 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1322 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1323 new_bmcr = bmcr & ~BMCR_ANENABLE;
1324 new_bmcr |= BMCR_SPEED1000;
1326 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1327 if (bp->req_line_speed == SPEED_2500)
1328 bnx2_enable_forced_2g5(bp);
1329 else if (bp->req_line_speed == SPEED_1000) {
1330 bnx2_disable_forced_2g5(bp);
1331 new_bmcr &= ~0x2000;
1334 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1335 if (bp->req_line_speed == SPEED_2500)
1336 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1338 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1341 if (bp->req_duplex == DUPLEX_FULL) {
1342 adv |= ADVERTISE_1000XFULL;
1343 new_bmcr |= BMCR_FULLDPLX;
1346 adv |= ADVERTISE_1000XHALF;
1347 new_bmcr &= ~BMCR_FULLDPLX;
1349 if ((new_bmcr != bmcr) || (force_link_down)) {
1350 /* Force a link down visible on the other side */
1352 bnx2_write_phy(bp, bp->mii_adv, adv &
1353 ~(ADVERTISE_1000XFULL |
1354 ADVERTISE_1000XHALF));
1355 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1356 BMCR_ANRESTART | BMCR_ANENABLE);
1359 netif_carrier_off(bp->dev);
1360 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1361 bnx2_report_link(bp);
1363 bnx2_write_phy(bp, bp->mii_adv, adv);
1364 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1366 bnx2_resolve_flow_ctrl(bp);
1367 bnx2_set_mac_link(bp);
1372 bnx2_test_and_enable_2g5(bp);
1374 if (bp->advertising & ADVERTISED_1000baseT_Full)
1375 new_adv |= ADVERTISE_1000XFULL;
1377 new_adv |= bnx2_phy_get_pause_adv(bp);
1379 bnx2_read_phy(bp, bp->mii_adv, &adv);
1380 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1382 bp->serdes_an_pending = 0;
1383 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1384 /* Force a link down visible on the other side */
1386 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1387 spin_unlock_bh(&bp->phy_lock);
1389 spin_lock_bh(&bp->phy_lock);
1392 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1393 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1395 /* Speed up link-up time when the link partner
1396 * does not autonegotiate which is very common
1397 * in blade servers. Some blade servers use
1398 * IPMI for kerboard input and it's important
1399 * to minimize link disruptions. Autoneg. involves
1400 * exchanging base pages plus 3 next pages and
1401 * normally completes in about 120 msec.
1403 bp->current_interval = SERDES_AN_TIMEOUT;
1404 bp->serdes_an_pending = 1;
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1407 bnx2_resolve_flow_ctrl(bp);
1408 bnx2_set_mac_link(bp);
1414 #define ETHTOOL_ALL_FIBRE_SPEED \
1415 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1416 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1417 (ADVERTISED_1000baseT_Full)
1419 #define ETHTOOL_ALL_COPPER_SPEED \
1420 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1421 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1422 ADVERTISED_1000baseT_Full)
1424 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1425 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1427 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1430 bnx2_set_default_remote_link(struct bnx2 *bp)
1434 if (bp->phy_port == PORT_TP)
1435 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1439 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1440 bp->req_line_speed = 0;
1441 bp->autoneg |= AUTONEG_SPEED;
1442 bp->advertising = ADVERTISED_Autoneg;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1444 bp->advertising |= ADVERTISED_10baseT_Half;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1446 bp->advertising |= ADVERTISED_10baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1448 bp->advertising |= ADVERTISED_100baseT_Half;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1450 bp->advertising |= ADVERTISED_100baseT_Full;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1452 bp->advertising |= ADVERTISED_1000baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1454 bp->advertising |= ADVERTISED_2500baseX_Full;
1457 bp->advertising = 0;
1458 bp->req_duplex = DUPLEX_FULL;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1460 bp->req_line_speed = SPEED_10;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1462 bp->req_duplex = DUPLEX_HALF;
1464 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1465 bp->req_line_speed = SPEED_100;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1467 bp->req_duplex = DUPLEX_HALF;
1469 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1470 bp->req_line_speed = SPEED_1000;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1472 bp->req_line_speed = SPEED_2500;
1477 bnx2_set_default_link(struct bnx2 *bp)
1479 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1480 return bnx2_set_default_remote_link(bp);
1482 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1483 bp->req_line_speed = 0;
1484 if (bp->phy_flags & PHY_SERDES_FLAG) {
1487 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1490 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1491 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1493 bp->req_line_speed = bp->line_speed = SPEED_1000;
1494 bp->req_duplex = DUPLEX_FULL;
1497 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1501 bnx2_send_heart_beat(struct bnx2 *bp)
1506 spin_lock(&bp->indirect_lock);
1507 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1508 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1509 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1510 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1511 spin_unlock(&bp->indirect_lock);
1515 bnx2_remote_phy_event(struct bnx2 *bp)
1518 u8 link_up = bp->link_up;
1521 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1523 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1524 bnx2_send_heart_beat(bp);
1526 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1528 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1534 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1535 bp->duplex = DUPLEX_FULL;
1537 case BNX2_LINK_STATUS_10HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_10FULL:
1540 bp->line_speed = SPEED_10;
1542 case BNX2_LINK_STATUS_100HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_100BASE_T4:
1545 case BNX2_LINK_STATUS_100FULL:
1546 bp->line_speed = SPEED_100;
1548 case BNX2_LINK_STATUS_1000HALF:
1549 bp->duplex = DUPLEX_HALF;
1550 case BNX2_LINK_STATUS_1000FULL:
1551 bp->line_speed = SPEED_1000;
1553 case BNX2_LINK_STATUS_2500HALF:
1554 bp->duplex = DUPLEX_HALF;
1555 case BNX2_LINK_STATUS_2500FULL:
1556 bp->line_speed = SPEED_2500;
1563 spin_lock(&bp->phy_lock);
1565 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1566 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1567 if (bp->duplex == DUPLEX_FULL)
1568 bp->flow_ctrl = bp->req_flow_ctrl;
1570 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1571 bp->flow_ctrl |= FLOW_CTRL_TX;
1572 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_RX;
1576 old_port = bp->phy_port;
1577 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1578 bp->phy_port = PORT_FIBRE;
1580 bp->phy_port = PORT_TP;
1582 if (old_port != bp->phy_port)
1583 bnx2_set_default_link(bp);
1585 spin_unlock(&bp->phy_lock);
1587 if (bp->link_up != link_up)
1588 bnx2_report_link(bp);
1590 bnx2_set_mac_link(bp);
1594 bnx2_set_remote_link(struct bnx2 *bp)
1598 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1600 case BNX2_FW_EVT_CODE_LINK_EVENT:
1601 bnx2_remote_phy_event(bp);
1603 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1605 bnx2_send_heart_beat(bp);
1612 bnx2_setup_copper_phy(struct bnx2 *bp)
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1619 if (bp->autoneg & AUTONEG_SPEED) {
1620 u32 adv_reg, adv1000_reg;
1621 u32 new_adv_reg = 0;
1622 u32 new_adv1000_reg = 0;
1624 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1625 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1626 ADVERTISE_PAUSE_ASYM);
1628 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1629 adv1000_reg &= PHY_ALL_1000_SPEED;
1631 if (bp->advertising & ADVERTISED_10baseT_Half)
1632 new_adv_reg |= ADVERTISE_10HALF;
1633 if (bp->advertising & ADVERTISED_10baseT_Full)
1634 new_adv_reg |= ADVERTISE_10FULL;
1635 if (bp->advertising & ADVERTISED_100baseT_Half)
1636 new_adv_reg |= ADVERTISE_100HALF;
1637 if (bp->advertising & ADVERTISED_100baseT_Full)
1638 new_adv_reg |= ADVERTISE_100FULL;
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv1000_reg |= ADVERTISE_1000FULL;
1642 new_adv_reg |= ADVERTISE_CSMA;
1644 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1646 if ((adv1000_reg != new_adv1000_reg) ||
1647 (adv_reg != new_adv_reg) ||
1648 ((bmcr & BMCR_ANENABLE) == 0)) {
1650 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1651 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1655 else if (bp->link_up) {
1656 /* Flow ctrl may have changed from auto to forced */
1657 /* or vice-versa. */
1659 bnx2_resolve_flow_ctrl(bp);
1660 bnx2_set_mac_link(bp);
1666 if (bp->req_line_speed == SPEED_100) {
1667 new_bmcr |= BMCR_SPEED100;
1669 if (bp->req_duplex == DUPLEX_FULL) {
1670 new_bmcr |= BMCR_FULLDPLX;
1672 if (new_bmcr != bmcr) {
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 if (bmsr & BMSR_LSTATUS) {
1679 /* Force link down */
1680 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1681 spin_unlock_bh(&bp->phy_lock);
1683 spin_lock_bh(&bp->phy_lock);
1685 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1686 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1689 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1691 /* Normally, the new speed is setup after the link has
1692 * gone down and up again. In some cases, link will not go
1693 * down so we need to set up the new speed here.
1695 if (bmsr & BMSR_LSTATUS) {
1696 bp->line_speed = bp->req_line_speed;
1697 bp->duplex = bp->req_duplex;
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1702 bnx2_resolve_flow_ctrl(bp);
1703 bnx2_set_mac_link(bp);
1709 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1711 if (bp->loopback == MAC_LOOPBACK)
1714 if (bp->phy_flags & PHY_SERDES_FLAG) {
1715 return (bnx2_setup_serdes_phy(bp, port));
1718 return (bnx2_setup_copper_phy(bp));
1723 bnx2_init_5709s_phy(struct bnx2 *bp)
1727 bp->mii_bmcr = MII_BMCR + 0x10;
1728 bp->mii_bmsr = MII_BMSR + 0x10;
1729 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1730 bp->mii_adv = MII_ADVERTISE + 0x10;
1731 bp->mii_lpa = MII_LPA + 0x10;
1732 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1735 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1742 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1743 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1744 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1745 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1747 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1748 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1749 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1750 val |= BCM5708S_UP1_2G5;
1752 val &= ~BCM5708S_UP1_2G5;
1753 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1756 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1757 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1758 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1762 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1763 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1764 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1766 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1772 bnx2_init_5708s_phy(struct bnx2 *bp)
1778 bp->mii_up1 = BCM5708S_UP1;
1780 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1781 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1784 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1785 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1786 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1788 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1789 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1790 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1792 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1793 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1794 val |= BCM5708S_UP1_2G5;
1795 bnx2_write_phy(bp, BCM5708S_UP1, val);
1798 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1799 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1800 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1801 /* increase tx signal amplitude */
1802 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1803 BCM5708S_BLK_ADDR_TX_MISC);
1804 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1805 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1806 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1810 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1811 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1816 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1817 BNX2_SHARED_HW_CFG_CONFIG);
1818 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1820 BCM5708S_BLK_ADDR_TX_MISC);
1821 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1822 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1823 BCM5708S_BLK_ADDR_DIG);
1830 bnx2_init_5706s_phy(struct bnx2 *bp)
1834 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1836 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1837 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1839 if (bp->dev->mtu > 1500) {
1842 /* Set extended packet length bit */
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1854 bnx2_write_phy(bp, 0x18, 0x7);
1855 bnx2_read_phy(bp, 0x18, &val);
1856 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1858 bnx2_write_phy(bp, 0x1c, 0x6c00);
1859 bnx2_read_phy(bp, 0x1c, &val);
1860 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1867 bnx2_init_copper_phy(struct bnx2 *bp)
1873 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1874 bnx2_write_phy(bp, 0x18, 0x0c00);
1875 bnx2_write_phy(bp, 0x17, 0x000a);
1876 bnx2_write_phy(bp, 0x15, 0x310b);
1877 bnx2_write_phy(bp, 0x17, 0x201f);
1878 bnx2_write_phy(bp, 0x15, 0x9506);
1879 bnx2_write_phy(bp, 0x17, 0x401f);
1880 bnx2_write_phy(bp, 0x15, 0x14e2);
1881 bnx2_write_phy(bp, 0x18, 0x0400);
1884 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1885 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1886 MII_BNX2_DSP_EXPAND_REG | 0x8);
1887 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1889 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1892 if (bp->dev->mtu > 1500) {
1893 /* Set extended packet length bit */
1894 bnx2_write_phy(bp, 0x18, 0x7);
1895 bnx2_read_phy(bp, 0x18, &val);
1896 bnx2_write_phy(bp, 0x18, val | 0x4000);
1898 bnx2_read_phy(bp, 0x10, &val);
1899 bnx2_write_phy(bp, 0x10, val | 0x1);
1902 bnx2_write_phy(bp, 0x18, 0x7);
1903 bnx2_read_phy(bp, 0x18, &val);
1904 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1906 bnx2_read_phy(bp, 0x10, &val);
1907 bnx2_write_phy(bp, 0x10, val & ~0x1);
1910 /* ethernet@wirespeed */
1911 bnx2_write_phy(bp, 0x18, 0x7007);
1912 bnx2_read_phy(bp, 0x18, &val);
1913 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1919 bnx2_init_phy(struct bnx2 *bp)
1924 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1925 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1927 bp->mii_bmcr = MII_BMCR;
1928 bp->mii_bmsr = MII_BMSR;
1929 bp->mii_bmsr1 = MII_BMSR;
1930 bp->mii_adv = MII_ADVERTISE;
1931 bp->mii_lpa = MII_LPA;
1933 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1935 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1938 bnx2_read_phy(bp, MII_PHYSID1, &val);
1939 bp->phy_id = val << 16;
1940 bnx2_read_phy(bp, MII_PHYSID2, &val);
1941 bp->phy_id |= val & 0xffff;
1943 if (bp->phy_flags & PHY_SERDES_FLAG) {
1944 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1945 rc = bnx2_init_5706s_phy(bp);
1946 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1947 rc = bnx2_init_5708s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1949 rc = bnx2_init_5709s_phy(bp);
1952 rc = bnx2_init_copper_phy(bp);
1957 rc = bnx2_setup_phy(bp, bp->phy_port);
1963 bnx2_set_mac_loopback(struct bnx2 *bp)
1967 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1968 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1969 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1970 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1975 static int bnx2_test_link(struct bnx2 *);
1978 bnx2_set_phy_loopback(struct bnx2 *bp)
1983 spin_lock_bh(&bp->phy_lock);
1984 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1986 spin_unlock_bh(&bp->phy_lock);
1990 for (i = 0; i < 10; i++) {
1991 if (bnx2_test_link(bp) == 0)
1996 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1997 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1999 BNX2_EMAC_MODE_25G_MODE);
2001 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2002 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2008 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2014 msg_data |= bp->fw_wr_seq;
2016 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2018 /* wait for an acknowledgement. */
2019 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2022 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2024 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2027 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2030 /* If we timed out, inform the firmware that this is the case. */
2031 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2033 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2036 msg_data &= ~BNX2_DRV_MSG_CODE;
2037 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2044 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2051 bnx2_init_5709_context(struct bnx2 *bp)
2056 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2057 val |= (BCM_PAGE_BITS - 8) << 16;
2058 REG_WR(bp, BNX2_CTX_COMMAND, val);
2059 for (i = 0; i < 10; i++) {
2060 val = REG_RD(bp, BNX2_CTX_COMMAND);
2061 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2065 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2068 for (i = 0; i < bp->ctx_pages; i++) {
2071 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2072 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2073 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2074 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2075 (u64) bp->ctx_blk_mapping[i] >> 32);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2077 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2078 for (j = 0; j < 10; j++) {
2080 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2081 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2085 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2094 bnx2_init_context(struct bnx2 *bp)
2100 u32 vcid_addr, pcid_addr, offset;
2105 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2108 vcid_addr = GET_PCID_ADDR(vcid);
2110 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2115 pcid_addr = GET_PCID_ADDR(new_vcid);
2118 vcid_addr = GET_CID_ADDR(vcid);
2119 pcid_addr = vcid_addr;
2122 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2123 vcid_addr += (i << PHY_CTX_SHIFT);
2124 pcid_addr += (i << PHY_CTX_SHIFT);
2126 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2127 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 /* Zero out the context. */
2130 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2131 CTX_WR(bp, 0x00, offset, 0);
2133 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2134 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2140 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2146 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2147 if (good_mbuf == NULL) {
2148 printk(KERN_ERR PFX "Failed to allocate memory in "
2149 "bnx2_alloc_bad_rbuf\n");
2153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2154 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2158 /* Allocate a bunch of mbufs and save the good ones in an array. */
2159 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2160 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2161 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2163 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2165 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2167 /* The addresses with Bit 9 set are bad memory blocks. */
2168 if (!(val & (1 << 9))) {
2169 good_mbuf[good_mbuf_cnt] = (u16) val;
2173 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2176 /* Free the good ones back to the mbuf pool thus discarding
2177 * all the bad ones. */
2178 while (good_mbuf_cnt) {
2181 val = good_mbuf[good_mbuf_cnt];
2182 val = (val << 9) | val | 1;
2184 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2191 bnx2_set_mac_addr(struct bnx2 *bp)
2194 u8 *mac_addr = bp->dev->dev_addr;
2196 val = (mac_addr[0] << 8) | mac_addr[1];
2198 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2200 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2201 (mac_addr[4] << 8) | mac_addr[5];
2203 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2207 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2209 struct sk_buff *skb;
2210 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2212 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2213 unsigned long align;
2215 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2220 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2221 skb_reserve(skb, BNX2_RX_ALIGN - align);
2223 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2224 PCI_DMA_FROMDEVICE);
2227 pci_unmap_addr_set(rx_buf, mapping, mapping);
2229 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2230 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2232 bp->rx_prod_bseq += bp->rx_buf_use_size;
2238 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2240 struct status_block *sblk = bp->status_blk;
2241 u32 new_link_state, old_link_state;
2244 new_link_state = sblk->status_attn_bits & event;
2245 old_link_state = sblk->status_attn_bits_ack & event;
2246 if (new_link_state != old_link_state) {
2248 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2258 bnx2_phy_int(struct bnx2 *bp)
2260 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2261 spin_lock(&bp->phy_lock);
2263 spin_unlock(&bp->phy_lock);
2265 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2266 bnx2_set_remote_link(bp);
2271 bnx2_tx_int(struct bnx2 *bp)
2273 struct status_block *sblk = bp->status_blk;
2274 u16 hw_cons, sw_cons, sw_ring_cons;
2277 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2278 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2281 sw_cons = bp->tx_cons;
2283 while (sw_cons != hw_cons) {
2284 struct sw_bd *tx_buf;
2285 struct sk_buff *skb;
2288 sw_ring_cons = TX_RING_IDX(sw_cons);
2290 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2293 /* partial BD completions possible with TSO packets */
2294 if (skb_is_gso(skb)) {
2295 u16 last_idx, last_ring_idx;
2297 last_idx = sw_cons +
2298 skb_shinfo(skb)->nr_frags + 1;
2299 last_ring_idx = sw_ring_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2304 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2310 skb_headlen(skb), PCI_DMA_TODEVICE);
2313 last = skb_shinfo(skb)->nr_frags;
2315 for (i = 0; i < last; i++) {
2316 sw_cons = NEXT_TX_BD(sw_cons);
2318 pci_unmap_page(bp->pdev,
2320 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2322 skb_shinfo(skb)->frags[i].size,
2326 sw_cons = NEXT_TX_BD(sw_cons);
2328 tx_free_bd += last + 1;
2332 hw_cons = bp->hw_tx_cons =
2333 sblk->status_tx_quick_consumer_index0;
2335 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2340 bp->tx_cons = sw_cons;
2341 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2342 * before checking for netif_queue_stopped(). Without the
2343 * memory barrier, there is a small possibility that bnx2_start_xmit()
2344 * will miss it and cause the queue to be stopped forever.
2348 if (unlikely(netif_queue_stopped(bp->dev)) &&
2349 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2350 netif_tx_lock(bp->dev);
2351 if ((netif_queue_stopped(bp->dev)) &&
2352 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2353 netif_wake_queue(bp->dev);
2354 netif_tx_unlock(bp->dev);
2359 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2362 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2363 struct rx_bd *cons_bd, *prod_bd;
2365 cons_rx_buf = &bp->rx_buf_ring[cons];
2366 prod_rx_buf = &bp->rx_buf_ring[prod];
2368 pci_dma_sync_single_for_device(bp->pdev,
2369 pci_unmap_addr(cons_rx_buf, mapping),
2370 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2372 bp->rx_prod_bseq += bp->rx_buf_use_size;
2374 prod_rx_buf->skb = skb;
2379 pci_unmap_addr_set(prod_rx_buf, mapping,
2380 pci_unmap_addr(cons_rx_buf, mapping));
2382 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2383 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2384 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2385 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2389 bnx2_rx_int(struct bnx2 *bp, int budget)
2391 struct status_block *sblk = bp->status_blk;
2392 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2393 struct l2_fhdr *rx_hdr;
2396 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2397 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2407 while (sw_cons != hw_cons) {
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
2412 dma_addr_t dma_addr;
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2430 if ((status = rx_hdr->l2_fhdr_status) &
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2440 /* Since we don't have a jumbo ring, copy small packets
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL)
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2465 skb_reserve(skb, bp->rx_offset);
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478 (ntohs(skb->protocol) != 0x8100)) {
2485 skb->ip_summed = CHECKSUM_NONE;
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2502 netif_receive_skb(skb);
2504 bp->dev->last_rx = jiffies;
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2511 if ((rx_pkt == budget))
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
2516 hw_cons = bp->hw_rx_cons =
2517 sblk->status_rx_quick_consumer_index0;
2518 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2523 bp->rx_cons = sw_cons;
2524 bp->rx_prod = sw_prod;
2526 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2528 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2536 /* MSI ISR - The only difference between this and the INTx ISR
2537 * is that the MSI interrupt is always serviced.
2540 bnx2_msi(int irq, void *dev_instance)
2542 struct net_device *dev = dev_instance;
2543 struct bnx2 *bp = netdev_priv(dev);
2545 prefetch(bp->status_blk);
2546 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2547 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2548 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2550 /* Return here if interrupt is disabled. */
2551 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2554 netif_rx_schedule(dev, &bp->napi);
2560 bnx2_msi_1shot(int irq, void *dev_instance)
2562 struct net_device *dev = dev_instance;
2563 struct bnx2 *bp = netdev_priv(dev);
2565 prefetch(bp->status_blk);
2567 /* Return here if interrupt is disabled. */
2568 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2571 netif_rx_schedule(dev, &bp->napi);
2577 bnx2_interrupt(int irq, void *dev_instance)
2579 struct net_device *dev = dev_instance;
2580 struct bnx2 *bp = netdev_priv(dev);
2581 struct status_block *sblk = bp->status_blk;
2583 /* When using INTx, it is possible for the interrupt to arrive
2584 * at the CPU before the status block posted prior to the
2585 * interrupt. Reading a register will flush the status block.
2586 * When using MSI, the MSI message will always complete after
2587 * the status block write.
2589 if ((sblk->status_idx == bp->last_status_idx) &&
2590 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2591 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2595 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2598 /* Read back to deassert IRQ immediately to avoid too many
2599 * spurious interrupts.
2601 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2603 /* Return here if interrupt is shared and is disabled. */
2604 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2607 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2608 bp->last_status_idx = sblk->status_idx;
2609 __netif_rx_schedule(dev, &bp->napi);
2615 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2616 STATUS_ATTN_BITS_TIMER_ABORT)
2619 bnx2_has_work(struct bnx2 *bp)
2621 struct status_block *sblk = bp->status_blk;
2623 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2624 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2627 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2628 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2635 bnx2_poll(struct napi_struct *napi, int budget)
2637 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2638 struct net_device *dev = bp->dev;
2639 struct status_block *sblk = bp->status_blk;
2640 u32 status_attn_bits = sblk->status_attn_bits;
2641 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2644 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2645 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2649 /* This is needed to take care of transient status
2650 * during link changes.
2652 REG_WR(bp, BNX2_HC_COMMAND,
2653 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2654 REG_RD(bp, BNX2_HC_COMMAND);
2657 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2660 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2661 work_done = bnx2_rx_int(bp, budget);
2663 bp->last_status_idx = bp->status_blk->status_idx;
2666 if (!bnx2_has_work(bp)) {
2667 netif_rx_complete(dev, napi);
2668 if (likely(bp->flags & USING_MSI_FLAG)) {
2669 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2670 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2671 bp->last_status_idx);
2674 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2675 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2676 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2677 bp->last_status_idx);
2679 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2680 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2681 bp->last_status_idx);
2687 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2688 * from set_multicast.
2691 bnx2_set_rx_mode(struct net_device *dev)
2693 struct bnx2 *bp = netdev_priv(dev);
2694 u32 rx_mode, sort_mode;
2697 spin_lock_bh(&bp->phy_lock);
2699 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2700 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2701 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2703 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2704 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2706 if (!(bp->flags & ASF_ENABLE_FLAG))
2707 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2709 if (dev->flags & IFF_PROMISC) {
2710 /* Promiscuous mode. */
2711 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2712 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2713 BNX2_RPM_SORT_USER0_PROM_VLAN;
2715 else if (dev->flags & IFF_ALLMULTI) {
2716 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2717 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2720 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2723 /* Accept one or more multicast(s). */
2724 struct dev_mc_list *mclist;
2725 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2730 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2732 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2733 i++, mclist = mclist->next) {
2735 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2737 regidx = (bit & 0xe0) >> 5;
2739 mc_filter[regidx] |= (1 << bit);
2742 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2743 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2747 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2750 if (rx_mode != bp->rx_mode) {
2751 bp->rx_mode = rx_mode;
2752 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2755 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2756 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2759 spin_unlock_bh(&bp->phy_lock);
2762 #define FW_BUF_SIZE 0x8000
2765 bnx2_gunzip_init(struct bnx2 *bp)
2767 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2770 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2773 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2774 if (bp->strm->workspace == NULL)
2784 vfree(bp->gunzip_buf);
2785 bp->gunzip_buf = NULL;
2788 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2789 "uncompression.\n", bp->dev->name);
2794 bnx2_gunzip_end(struct bnx2 *bp)
2796 kfree(bp->strm->workspace);
2801 if (bp->gunzip_buf) {
2802 vfree(bp->gunzip_buf);
2803 bp->gunzip_buf = NULL;
2808 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2812 /* check gzip header */
2813 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2819 if (zbuf[3] & FNAME)
2820 while ((zbuf[n++] != 0) && (n < len));
2822 bp->strm->next_in = zbuf + n;
2823 bp->strm->avail_in = len - n;
2824 bp->strm->next_out = bp->gunzip_buf;
2825 bp->strm->avail_out = FW_BUF_SIZE;
2827 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2831 rc = zlib_inflate(bp->strm, Z_FINISH);
2833 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2834 *outbuf = bp->gunzip_buf;
2836 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2837 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2838 bp->dev->name, bp->strm->msg);
2840 zlib_inflateEnd(bp->strm);
2842 if (rc == Z_STREAM_END)
2849 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2856 for (i = 0; i < rv2p_code_len; i += 8) {
2857 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2859 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2862 if (rv2p_proc == RV2P_PROC1) {
2863 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2864 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2867 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2868 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2872 /* Reset the processor, un-stall is done later. */
2873 if (rv2p_proc == RV2P_PROC1) {
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2877 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2882 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2889 val = REG_RD_IND(bp, cpu_reg->mode);
2890 val |= cpu_reg->mode_value_halt;
2891 REG_WR_IND(bp, cpu_reg->mode, val);
2892 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2894 /* Load the Text area. */
2895 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2900 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2910 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2911 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2915 /* Load the Data area. */
2916 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2920 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2921 REG_WR_IND(bp, offset, fw->data[j]);
2925 /* Load the SBSS area. */
2926 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2930 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2931 REG_WR_IND(bp, offset, fw->sbss[j]);
2935 /* Load the BSS area. */
2936 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2940 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2941 REG_WR_IND(bp, offset, fw->bss[j]);
2945 /* Load the Read-Only area. */
2946 offset = cpu_reg->spad_base +
2947 (fw->rodata_addr - cpu_reg->mips_view_base);
2951 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2952 REG_WR_IND(bp, offset, fw->rodata[j]);
2956 /* Clear the pre-fetch instruction. */
2957 REG_WR_IND(bp, cpu_reg->inst, 0);
2958 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2960 /* Start the CPU. */
2961 val = REG_RD_IND(bp, cpu_reg->mode);
2962 val &= ~cpu_reg->mode_value_halt;
2963 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2964 REG_WR_IND(bp, cpu_reg->mode, val);
2970 bnx2_init_cpus(struct bnx2 *bp)
2972 struct cpu_reg cpu_reg;
2978 if ((rc = bnx2_gunzip_init(bp)) != 0)
2981 /* Initialize the RV2P processor. */
2982 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2987 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2989 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2994 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2996 /* Initialize the RX Processor. */
2997 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2998 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2999 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3000 cpu_reg.state = BNX2_RXP_CPU_STATE;
3001 cpu_reg.state_value_clear = 0xffffff;
3002 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3003 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3004 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3005 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3006 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3007 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3008 cpu_reg.mips_view_base = 0x8000000;
3010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011 fw = &bnx2_rxp_fw_09;
3013 fw = &bnx2_rxp_fw_06;
3015 rc = load_cpu_fw(bp, &cpu_reg, fw);
3019 /* Initialize the TX Processor. */
3020 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3021 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3022 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3023 cpu_reg.state = BNX2_TXP_CPU_STATE;
3024 cpu_reg.state_value_clear = 0xffffff;
3025 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3026 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3027 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3028 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3029 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3030 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3031 cpu_reg.mips_view_base = 0x8000000;
3033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034 fw = &bnx2_txp_fw_09;
3036 fw = &bnx2_txp_fw_06;
3038 rc = load_cpu_fw(bp, &cpu_reg, fw);
3042 /* Initialize the TX Patch-up Processor. */
3043 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3044 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3045 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3046 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3047 cpu_reg.state_value_clear = 0xffffff;
3048 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3049 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3050 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3051 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3052 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3053 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3054 cpu_reg.mips_view_base = 0x8000000;
3056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057 fw = &bnx2_tpat_fw_09;
3059 fw = &bnx2_tpat_fw_06;
3061 rc = load_cpu_fw(bp, &cpu_reg, fw);
3065 /* Initialize the Completion Processor. */
3066 cpu_reg.mode = BNX2_COM_CPU_MODE;
3067 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BNX2_COM_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3072 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3073 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3075 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
3079 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3080 fw = &bnx2_com_fw_09;
3082 fw = &bnx2_com_fw_06;
3084 rc = load_cpu_fw(bp, &cpu_reg, fw);
3088 /* Initialize the Command Processor. */
3089 cpu_reg.mode = BNX2_CP_CPU_MODE;
3090 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3091 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3092 cpu_reg.state = BNX2_CP_CPU_STATE;
3093 cpu_reg.state_value_clear = 0xffffff;
3094 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3095 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3096 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3097 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3098 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3099 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3100 cpu_reg.mips_view_base = 0x8000000;
3102 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3103 fw = &bnx2_cp_fw_09;
3105 rc = load_cpu_fw(bp, &cpu_reg, fw);
3110 bnx2_gunzip_end(bp);
3115 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3119 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3125 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3126 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3127 PCI_PM_CTRL_PME_STATUS);
3129 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3130 /* delay required during transition out of D3hot */
3133 val = REG_RD(bp, BNX2_EMAC_MODE);
3134 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3135 val &= ~BNX2_EMAC_MODE_MPKT;
3136 REG_WR(bp, BNX2_EMAC_MODE, val);
3138 val = REG_RD(bp, BNX2_RPM_CONFIG);
3139 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3140 REG_WR(bp, BNX2_RPM_CONFIG, val);
3151 autoneg = bp->autoneg;
3152 advertising = bp->advertising;
3154 bp->autoneg = AUTONEG_SPEED;
3155 bp->advertising = ADVERTISED_10baseT_Half |
3156 ADVERTISED_10baseT_Full |
3157 ADVERTISED_100baseT_Half |
3158 ADVERTISED_100baseT_Full |
3161 bnx2_setup_copper_phy(bp);
3163 bp->autoneg = autoneg;
3164 bp->advertising = advertising;
3166 bnx2_set_mac_addr(bp);
3168 val = REG_RD(bp, BNX2_EMAC_MODE);
3170 /* Enable port mode. */
3171 val &= ~BNX2_EMAC_MODE_PORT;
3172 val |= BNX2_EMAC_MODE_PORT_MII |
3173 BNX2_EMAC_MODE_MPKT_RCVD |
3174 BNX2_EMAC_MODE_ACPI_RCVD |
3175 BNX2_EMAC_MODE_MPKT;
3177 REG_WR(bp, BNX2_EMAC_MODE, val);
3179 /* receive all multicast */
3180 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3181 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3184 REG_WR(bp, BNX2_EMAC_RX_MODE,
3185 BNX2_EMAC_RX_MODE_SORT_MODE);
3187 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3188 BNX2_RPM_SORT_USER0_MC_EN;
3189 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3190 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3191 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3192 BNX2_RPM_SORT_USER0_ENA);
3194 /* Need to enable EMAC and RPM for WOL. */
3195 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3196 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3197 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3198 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3200 val = REG_RD(bp, BNX2_RPM_CONFIG);
3201 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3202 REG_WR(bp, BNX2_RPM_CONFIG, val);
3204 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3207 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3210 if (!(bp->flags & NO_WOL_FLAG))
3211 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3213 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3214 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3215 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3224 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3226 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3229 /* No more memory access after this point until
3230 * device is brought back to D0.
3242 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3247 /* Request access to the flash interface. */
3248 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3264 bnx2_release_nvram_lock(struct bnx2 *bp)
3269 /* Relinquish nvram interface. */
3270 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3272 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3273 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3274 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3280 if (j >= NVRAM_TIMEOUT_COUNT)
3288 bnx2_enable_nvram_write(struct bnx2 *bp)
3292 val = REG_RD(bp, BNX2_MISC_CFG);
3293 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3295 if (bp->flash_info->flags & BNX2_NV_WREN) {
3298 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3299 REG_WR(bp, BNX2_NVM_COMMAND,
3300 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3302 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3305 val = REG_RD(bp, BNX2_NVM_COMMAND);
3306 if (val & BNX2_NVM_COMMAND_DONE)
3310 if (j >= NVRAM_TIMEOUT_COUNT)
3317 bnx2_disable_nvram_write(struct bnx2 *bp)
3321 val = REG_RD(bp, BNX2_MISC_CFG);
3322 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3327 bnx2_enable_nvram_access(struct bnx2 *bp)
3331 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3332 /* Enable both bits, even on read. */
3333 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3334 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3338 bnx2_disable_nvram_access(struct bnx2 *bp)
3342 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3343 /* Disable both bits, even after read. */
3344 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,