1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.5"
60 #define DRV_MODULE_RELDATE "September 20, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev,
472 sizeof(struct tx_bd) * TX_DESC_CNT,
473 bp->tx_desc_ring, bp->tx_desc_mapping);
474 bp->tx_desc_ring = NULL;
476 kfree(bp->tx_buf_ring);
477 bp->tx_buf_ring = NULL;
478 for (i = 0; i < bp->rx_max_ring; i++) {
479 if (bp->rx_desc_ring[i])
480 pci_free_consistent(bp->pdev,
481 sizeof(struct rx_bd) * RX_DESC_CNT,
483 bp->rx_desc_mapping[i]);
484 bp->rx_desc_ring[i] = NULL;
486 vfree(bp->rx_buf_ring);
487 bp->rx_buf_ring = NULL;
491 bnx2_alloc_mem(struct bnx2 *bp)
493 int i, status_blk_size;
495 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
497 if (bp->tx_buf_ring == NULL)
500 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501 sizeof(struct tx_bd) *
503 &bp->tx_desc_mapping);
504 if (bp->tx_desc_ring == NULL)
507 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
509 if (bp->rx_buf_ring == NULL)
512 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
515 for (i = 0; i < bp->rx_max_ring; i++) {
516 bp->rx_desc_ring[i] =
517 pci_alloc_consistent(bp->pdev,
518 sizeof(struct rx_bd) * RX_DESC_CNT,
519 &bp->rx_desc_mapping[i]);
520 if (bp->rx_desc_ring[i] == NULL)
525 /* Combine status and statistics blocks into one allocation. */
526 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527 bp->status_stats_size = status_blk_size +
528 sizeof(struct statistics_block);
530 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531 &bp->status_blk_mapping);
532 if (bp->status_blk == NULL)
535 memset(bp->status_blk, 0, bp->status_stats_size);
537 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
540 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
542 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544 if (bp->ctx_pages == 0)
546 for (i = 0; i < bp->ctx_pages; i++) {
547 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
549 &bp->ctx_blk_mapping[i]);
550 if (bp->ctx_blk[i] == NULL)
562 bnx2_report_fw_link(struct bnx2 *bp)
564 u32 fw_link_status = 0;
566 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
572 switch (bp->line_speed) {
574 if (bp->duplex == DUPLEX_HALF)
575 fw_link_status = BNX2_LINK_STATUS_10HALF;
577 fw_link_status = BNX2_LINK_STATUS_10FULL;
580 if (bp->duplex == DUPLEX_HALF)
581 fw_link_status = BNX2_LINK_STATUS_100HALF;
583 fw_link_status = BNX2_LINK_STATUS_100FULL;
586 if (bp->duplex == DUPLEX_HALF)
587 fw_link_status = BNX2_LINK_STATUS_1000HALF;
589 fw_link_status = BNX2_LINK_STATUS_1000FULL;
592 if (bp->duplex == DUPLEX_HALF)
593 fw_link_status = BNX2_LINK_STATUS_2500HALF;
595 fw_link_status = BNX2_LINK_STATUS_2500FULL;
599 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
602 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
604 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
607 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
611 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
615 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
617 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
621 bnx2_xceiver_str(struct bnx2 *bp)
623 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
629 bnx2_report_link(struct bnx2 *bp)
632 netif_carrier_on(bp->dev);
633 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634 bnx2_xceiver_str(bp));
636 printk("%d Mbps ", bp->line_speed);
638 if (bp->duplex == DUPLEX_FULL)
639 printk("full duplex");
641 printk("half duplex");
644 if (bp->flow_ctrl & FLOW_CTRL_RX) {
645 printk(", receive ");
646 if (bp->flow_ctrl & FLOW_CTRL_TX)
647 printk("& transmit ");
650 printk(", transmit ");
652 printk("flow control ON");
657 netif_carrier_off(bp->dev);
658 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659 bnx2_xceiver_str(bp));
662 bnx2_report_fw_link(bp);
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
668 u32 local_adv, remote_adv;
671 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
674 if (bp->duplex == DUPLEX_FULL) {
675 bp->flow_ctrl = bp->req_flow_ctrl;
680 if (bp->duplex != DUPLEX_FULL) {
684 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
688 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_TX;
691 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692 bp->flow_ctrl |= FLOW_CTRL_RX;
696 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
699 if (bp->phy_flags & PHY_SERDES_FLAG) {
700 u32 new_local_adv = 0;
701 u32 new_remote_adv = 0;
703 if (local_adv & ADVERTISE_1000XPAUSE)
704 new_local_adv |= ADVERTISE_PAUSE_CAP;
705 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706 new_local_adv |= ADVERTISE_PAUSE_ASYM;
707 if (remote_adv & ADVERTISE_1000XPAUSE)
708 new_remote_adv |= ADVERTISE_PAUSE_CAP;
709 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
712 local_adv = new_local_adv;
713 remote_adv = new_remote_adv;
716 /* See Table 28B-3 of 802.3ab-1999 spec. */
717 if (local_adv & ADVERTISE_PAUSE_CAP) {
718 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
722 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723 bp->flow_ctrl = FLOW_CTRL_RX;
727 if (remote_adv & ADVERTISE_PAUSE_CAP) {
728 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
732 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
736 bp->flow_ctrl = FLOW_CTRL_TX;
742 bnx2_5709s_linkup(struct bnx2 *bp)
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
752 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753 bp->line_speed = bp->req_line_speed;
754 bp->duplex = bp->req_duplex;
757 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
759 case MII_BNX2_GP_TOP_AN_SPEED_10:
760 bp->line_speed = SPEED_10;
762 case MII_BNX2_GP_TOP_AN_SPEED_100:
763 bp->line_speed = SPEED_100;
765 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767 bp->line_speed = SPEED_1000;
769 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770 bp->line_speed = SPEED_2500;
773 if (val & MII_BNX2_GP_TOP_AN_FD)
774 bp->duplex = DUPLEX_FULL;
776 bp->duplex = DUPLEX_HALF;
781 bnx2_5708s_linkup(struct bnx2 *bp)
786 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788 case BCM5708S_1000X_STAT1_SPEED_10:
789 bp->line_speed = SPEED_10;
791 case BCM5708S_1000X_STAT1_SPEED_100:
792 bp->line_speed = SPEED_100;
794 case BCM5708S_1000X_STAT1_SPEED_1G:
795 bp->line_speed = SPEED_1000;
797 case BCM5708S_1000X_STAT1_SPEED_2G5:
798 bp->line_speed = SPEED_2500;
801 if (val & BCM5708S_1000X_STAT1_FD)
802 bp->duplex = DUPLEX_FULL;
804 bp->duplex = DUPLEX_HALF;
810 bnx2_5706s_linkup(struct bnx2 *bp)
812 u32 bmcr, local_adv, remote_adv, common;
815 bp->line_speed = SPEED_1000;
817 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818 if (bmcr & BMCR_FULLDPLX) {
819 bp->duplex = DUPLEX_FULL;
822 bp->duplex = DUPLEX_HALF;
825 if (!(bmcr & BMCR_ANENABLE)) {
829 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
832 common = local_adv & remote_adv;
833 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
835 if (common & ADVERTISE_1000XFULL) {
836 bp->duplex = DUPLEX_FULL;
839 bp->duplex = DUPLEX_HALF;
847 bnx2_copper_linkup(struct bnx2 *bp)
851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852 if (bmcr & BMCR_ANENABLE) {
853 u32 local_adv, remote_adv, common;
855 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
858 common = local_adv & (remote_adv >> 2);
859 if (common & ADVERTISE_1000FULL) {
860 bp->line_speed = SPEED_1000;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_1000HALF) {
864 bp->line_speed = SPEED_1000;
865 bp->duplex = DUPLEX_HALF;
868 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871 common = local_adv & remote_adv;
872 if (common & ADVERTISE_100FULL) {
873 bp->line_speed = SPEED_100;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_100HALF) {
877 bp->line_speed = SPEED_100;
878 bp->duplex = DUPLEX_HALF;
880 else if (common & ADVERTISE_10FULL) {
881 bp->line_speed = SPEED_10;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_10HALF) {
885 bp->line_speed = SPEED_10;
886 bp->duplex = DUPLEX_HALF;
895 if (bmcr & BMCR_SPEED100) {
896 bp->line_speed = SPEED_100;
899 bp->line_speed = SPEED_10;
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
913 bnx2_set_mac_link(struct bnx2 *bp)
917 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919 (bp->duplex == DUPLEX_HALF)) {
920 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
923 /* Configure the EMAC mode register. */
924 val = REG_RD(bp, BNX2_EMAC_MODE);
926 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928 BNX2_EMAC_MODE_25G_MODE);
931 switch (bp->line_speed) {
933 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934 val |= BNX2_EMAC_MODE_PORT_MII_10M;
939 val |= BNX2_EMAC_MODE_PORT_MII;
942 val |= BNX2_EMAC_MODE_25G_MODE;
945 val |= BNX2_EMAC_MODE_PORT_GMII;
950 val |= BNX2_EMAC_MODE_PORT_GMII;
953 /* Set the MAC to operate in the appropriate duplex mode. */
954 if (bp->duplex == DUPLEX_HALF)
955 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956 REG_WR(bp, BNX2_EMAC_MODE, val);
958 /* Enable/disable rx PAUSE. */
959 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_RX)
962 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
965 /* Enable/disable tx PAUSE. */
966 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
969 if (bp->flow_ctrl & FLOW_CTRL_TX)
970 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
973 /* Acknowledge the interrupt. */
974 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
980 bnx2_enable_bmsr1(struct bnx2 *bp)
982 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983 (CHIP_NUM(bp) == CHIP_NUM_5709))
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985 MII_BNX2_BLK_ADDR_GP_STATUS);
989 bnx2_disable_bmsr1(struct bnx2 *bp)
991 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992 (CHIP_NUM(bp) == CHIP_NUM_5709))
993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1003 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1006 if (bp->autoneg & AUTONEG_SPEED)
1007 bp->advertising |= ADVERTISED_2500baseX_Full;
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (!(up1 & BCM5708S_UP1_2G5)) {
1014 up1 |= BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1032 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1038 bnx2_read_phy(bp, bp->mii_up1, &up1);
1039 if (up1 & BCM5708S_UP1_2G5) {
1040 up1 &= ~BCM5708S_UP1_2G5;
1041 bnx2_write_phy(bp, bp->mii_up1, up1);
1045 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1070 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076 bmcr |= BCM5708S_BMCR_FORCE_2500;
1079 if (bp->autoneg & AUTONEG_SPEED) {
1080 bmcr &= ~BMCR_ANENABLE;
1081 if (bp->req_duplex == DUPLEX_FULL)
1082 bmcr |= BMCR_FULLDPLX;
1084 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1092 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099 MII_BNX2_BLK_ADDR_SERDES_DIG);
1100 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1104 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1113 if (bp->autoneg & AUTONEG_SPEED)
1114 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1119 bnx2_set_link(struct bnx2 *bp)
1124 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1129 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1132 link_up = bp->link_up;
1134 bnx2_enable_bmsr1(bp);
1135 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137 bnx2_disable_bmsr1(bp);
1139 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1143 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144 if (val & BNX2_EMAC_STATUS_LINK)
1145 bmsr |= BMSR_LSTATUS;
1147 bmsr &= ~BMSR_LSTATUS;
1150 if (bmsr & BMSR_LSTATUS) {
1153 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155 bnx2_5706s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157 bnx2_5708s_linkup(bp);
1158 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159 bnx2_5709s_linkup(bp);
1162 bnx2_copper_linkup(bp);
1164 bnx2_resolve_flow_ctrl(bp);
1167 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168 (bp->autoneg & AUTONEG_SPEED))
1169 bnx2_disable_forced_2g5(bp);
1171 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1175 if (bp->link_up != link_up) {
1176 bnx2_report_link(bp);
1179 bnx2_set_mac_link(bp);
1185 bnx2_reset_phy(struct bnx2 *bp)
1190 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1192 #define PHY_RESET_MAX_WAIT 100
1193 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1196 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1197 if (!(reg & BMCR_RESET)) {
1202 if (i == PHY_RESET_MAX_WAIT) {
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1213 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPAUSE;
1220 adv = ADVERTISE_PAUSE_CAP;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_ASYM;
1231 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1236 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1247 u32 speed_arg = 0, pause_adv;
1249 pause_adv = bnx2_phy_get_pause_adv(bp);
1251 if (bp->autoneg & AUTONEG_SPEED) {
1252 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253 if (bp->advertising & ADVERTISED_10baseT_Half)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255 if (bp->advertising & ADVERTISED_10baseT_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257 if (bp->advertising & ADVERTISED_100baseT_Half)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259 if (bp->advertising & ADVERTISED_100baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 if (bp->req_line_speed == SPEED_2500)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268 else if (bp->req_line_speed == SPEED_1000)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270 else if (bp->req_line_speed == SPEED_100) {
1271 if (bp->req_duplex == DUPLEX_FULL)
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1274 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275 } else if (bp->req_line_speed == SPEED_10) {
1276 if (bp->req_duplex == DUPLEX_FULL)
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1279 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1283 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1288 if (port == PORT_TP)
1289 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1292 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1294 spin_unlock_bh(&bp->phy_lock);
1295 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296 spin_lock_bh(&bp->phy_lock);
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308 return (bnx2_setup_remote_phy(bp, port));
1310 if (!(bp->autoneg & AUTONEG_SPEED)) {
1312 int force_link_down = 0;
1314 if (bp->req_line_speed == SPEED_2500) {
1315 if (!bnx2_test_and_enable_2g5(bp))
1316 force_link_down = 1;
1317 } else if (bp->req_line_speed == SPEED_1000) {
1318 if (bnx2_test_and_disable_2g5(bp))
1319 force_link_down = 1;
1321 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1324 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326 new_bmcr |= BMCR_SPEED1000;
1328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 bnx2_enable_forced_2g5(bp);
1331 else if (bp->req_line_speed == SPEED_1000) {
1332 bnx2_disable_forced_2g5(bp);
1333 new_bmcr &= ~0x2000;
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 if (bp->req_line_speed == SPEED_2500)
1338 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1340 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1343 if (bp->req_duplex == DUPLEX_FULL) {
1344 adv |= ADVERTISE_1000XFULL;
1345 new_bmcr |= BMCR_FULLDPLX;
1348 adv |= ADVERTISE_1000XHALF;
1349 new_bmcr &= ~BMCR_FULLDPLX;
1351 if ((new_bmcr != bmcr) || (force_link_down)) {
1352 /* Force a link down visible on the other side */
1354 bnx2_write_phy(bp, bp->mii_adv, adv &
1355 ~(ADVERTISE_1000XFULL |
1356 ADVERTISE_1000XHALF));
1357 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358 BMCR_ANRESTART | BMCR_ANENABLE);
1361 netif_carrier_off(bp->dev);
1362 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363 bnx2_report_link(bp);
1365 bnx2_write_phy(bp, bp->mii_adv, adv);
1366 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1368 bnx2_resolve_flow_ctrl(bp);
1369 bnx2_set_mac_link(bp);
1374 bnx2_test_and_enable_2g5(bp);
1376 if (bp->advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= ADVERTISE_1000XFULL;
1379 new_adv |= bnx2_phy_get_pause_adv(bp);
1381 bnx2_read_phy(bp, bp->mii_adv, &adv);
1382 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1384 bp->serdes_an_pending = 0;
1385 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386 /* Force a link down visible on the other side */
1388 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389 spin_unlock_bh(&bp->phy_lock);
1391 spin_lock_bh(&bp->phy_lock);
1394 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1397 /* Speed up link-up time when the link partner
1398 * does not autonegotiate which is very common
1399 * in blade servers. Some blade servers use
1400 * IPMI for kerboard input and it's important
1401 * to minimize link disruptions. Autoneg. involves
1402 * exchanging base pages plus 3 next pages and
1403 * normally completes in about 120 msec.
1405 bp->current_interval = SERDES_AN_TIMEOUT;
1406 bp->serdes_an_pending = 1;
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1416 #define ETHTOOL_ALL_FIBRE_SPEED \
1417 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1418 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419 (ADVERTISED_1000baseT_Full)
1421 #define ETHTOOL_ALL_COPPER_SPEED \
1422 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1423 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1424 ADVERTISED_1000baseT_Full)
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1436 if (bp->phy_port == PORT_TP)
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1439 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1441 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442 bp->req_line_speed = 0;
1443 bp->autoneg |= AUTONEG_SPEED;
1444 bp->advertising = ADVERTISED_Autoneg;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446 bp->advertising |= ADVERTISED_10baseT_Half;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448 bp->advertising |= ADVERTISED_10baseT_Full;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450 bp->advertising |= ADVERTISED_100baseT_Half;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452 bp->advertising |= ADVERTISED_100baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454 bp->advertising |= ADVERTISED_1000baseT_Full;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456 bp->advertising |= ADVERTISED_2500baseX_Full;
1459 bp->advertising = 0;
1460 bp->req_duplex = DUPLEX_FULL;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462 bp->req_line_speed = SPEED_10;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464 bp->req_duplex = DUPLEX_HALF;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467 bp->req_line_speed = SPEED_100;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469 bp->req_duplex = DUPLEX_HALF;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472 bp->req_line_speed = SPEED_1000;
1473 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474 bp->req_line_speed = SPEED_2500;
1479 bnx2_set_default_link(struct bnx2 *bp)
1481 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482 return bnx2_set_default_remote_link(bp);
1484 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485 bp->req_line_speed = 0;
1486 if (bp->phy_flags & PHY_SERDES_FLAG) {
1489 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1491 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1495 bp->req_line_speed = bp->line_speed = SPEED_1000;
1496 bp->req_duplex = DUPLEX_FULL;
1499 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1508 spin_lock(&bp->indirect_lock);
1509 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513 spin_unlock(&bp->indirect_lock);
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1520 u8 link_up = bp->link_up;
1523 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1525 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526 bnx2_send_heart_beat(bp);
1528 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1530 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1536 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537 bp->duplex = DUPLEX_FULL;
1539 case BNX2_LINK_STATUS_10HALF:
1540 bp->duplex = DUPLEX_HALF;
1541 case BNX2_LINK_STATUS_10FULL:
1542 bp->line_speed = SPEED_10;
1544 case BNX2_LINK_STATUS_100HALF:
1545 bp->duplex = DUPLEX_HALF;
1546 case BNX2_LINK_STATUS_100BASE_T4:
1547 case BNX2_LINK_STATUS_100FULL:
1548 bp->line_speed = SPEED_100;
1550 case BNX2_LINK_STATUS_1000HALF:
1551 bp->duplex = DUPLEX_HALF;
1552 case BNX2_LINK_STATUS_1000FULL:
1553 bp->line_speed = SPEED_1000;
1555 case BNX2_LINK_STATUS_2500HALF:
1556 bp->duplex = DUPLEX_HALF;
1557 case BNX2_LINK_STATUS_2500FULL:
1558 bp->line_speed = SPEED_2500;
1565 spin_lock(&bp->phy_lock);
1567 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569 if (bp->duplex == DUPLEX_FULL)
1570 bp->flow_ctrl = bp->req_flow_ctrl;
1572 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_TX;
1574 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575 bp->flow_ctrl |= FLOW_CTRL_RX;
1578 old_port = bp->phy_port;
1579 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580 bp->phy_port = PORT_FIBRE;
1582 bp->phy_port = PORT_TP;
1584 if (old_port != bp->phy_port)
1585 bnx2_set_default_link(bp);
1587 spin_unlock(&bp->phy_lock);
1589 if (bp->link_up != link_up)
1590 bnx2_report_link(bp);
1592 bnx2_set_mac_link(bp);
1596 bnx2_set_remote_link(struct bnx2 *bp)
1600 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1602 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603 bnx2_remote_phy_event(bp);
1605 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1607 bnx2_send_heart_beat(bp);
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1621 if (bp->autoneg & AUTONEG_SPEED) {
1622 u32 adv_reg, adv1000_reg;
1623 u32 new_adv_reg = 0;
1624 u32 new_adv1000_reg = 0;
1626 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1630 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631 adv1000_reg &= PHY_ALL_1000_SPEED;
1633 if (bp->advertising & ADVERTISED_10baseT_Half)
1634 new_adv_reg |= ADVERTISE_10HALF;
1635 if (bp->advertising & ADVERTISED_10baseT_Full)
1636 new_adv_reg |= ADVERTISE_10FULL;
1637 if (bp->advertising & ADVERTISED_100baseT_Half)
1638 new_adv_reg |= ADVERTISE_100HALF;
1639 if (bp->advertising & ADVERTISED_100baseT_Full)
1640 new_adv_reg |= ADVERTISE_100FULL;
1641 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642 new_adv1000_reg |= ADVERTISE_1000FULL;
1644 new_adv_reg |= ADVERTISE_CSMA;
1646 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1648 if ((adv1000_reg != new_adv1000_reg) ||
1649 (adv_reg != new_adv_reg) ||
1650 ((bmcr & BMCR_ANENABLE) == 0)) {
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1657 else if (bp->link_up) {
1658 /* Flow ctrl may have changed from auto to forced */
1659 /* or vice-versa. */
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 if (bp->req_line_speed == SPEED_100) {
1669 new_bmcr |= BMCR_SPEED100;
1671 if (bp->req_duplex == DUPLEX_FULL) {
1672 new_bmcr |= BMCR_FULLDPLX;
1674 if (new_bmcr != bmcr) {
1677 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 if (bmsr & BMSR_LSTATUS) {
1681 /* Force link down */
1682 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683 spin_unlock_bh(&bp->phy_lock);
1685 spin_lock_bh(&bp->phy_lock);
1687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1691 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1693 /* Normally, the new speed is setup after the link has
1694 * gone down and up again. In some cases, link will not go
1695 * down so we need to set up the new speed here.
1697 if (bmsr & BMSR_LSTATUS) {
1698 bp->line_speed = bp->req_line_speed;
1699 bp->duplex = bp->req_duplex;
1700 bnx2_resolve_flow_ctrl(bp);
1701 bnx2_set_mac_link(bp);
1704 bnx2_resolve_flow_ctrl(bp);
1705 bnx2_set_mac_link(bp);
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1713 if (bp->loopback == MAC_LOOPBACK)
1716 if (bp->phy_flags & PHY_SERDES_FLAG) {
1717 return (bnx2_setup_serdes_phy(bp, port));
1720 return (bnx2_setup_copper_phy(bp));
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1729 bp->mii_bmcr = MII_BMCR + 0x10;
1730 bp->mii_bmsr = MII_BMSR + 0x10;
1731 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732 bp->mii_adv = MII_ADVERTISE + 0x10;
1733 bp->mii_lpa = MII_LPA + 0x10;
1734 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1739 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1744 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752 val |= BCM5708S_UP1_2G5;
1754 val &= ~BCM5708S_UP1_2G5;
1755 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1762 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1764 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1768 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1780 bp->mii_up1 = BCM5708S_UP1;
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1786 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1790 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1794 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796 val |= BCM5708S_UP1_2G5;
1797 bnx2_write_phy(bp, BCM5708S_UP1, val);
1800 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803 /* increase tx signal amplitude */
1804 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805 BCM5708S_BLK_ADDR_TX_MISC);
1806 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1812 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1818 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819 BNX2_SHARED_HW_CFG_CONFIG);
1820 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822 BCM5708S_BLK_ADDR_TX_MISC);
1823 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825 BCM5708S_BLK_ADDR_DIG);
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1836 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1838 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1841 if (bp->dev->mtu > 1500) {
1844 /* Set extended packet length bit */
1845 bnx2_write_phy(bp, 0x18, 0x7);
1846 bnx2_read_phy(bp, 0x18, &val);
1847 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1849 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850 bnx2_read_phy(bp, 0x1c, &val);
1851 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1856 bnx2_write_phy(bp, 0x18, 0x7);
1857 bnx2_read_phy(bp, 0x18, &val);
1858 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1860 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861 bnx2_read_phy(bp, 0x1c, &val);
1862 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1875 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876 bnx2_write_phy(bp, 0x18, 0x0c00);
1877 bnx2_write_phy(bp, 0x17, 0x000a);
1878 bnx2_write_phy(bp, 0x15, 0x310b);
1879 bnx2_write_phy(bp, 0x17, 0x201f);
1880 bnx2_write_phy(bp, 0x15, 0x9506);
1881 bnx2_write_phy(bp, 0x17, 0x401f);
1882 bnx2_write_phy(bp, 0x15, 0x14e2);
1883 bnx2_write_phy(bp, 0x18, 0x0400);
1886 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888 MII_BNX2_DSP_EXPAND_REG | 0x8);
1889 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1891 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1894 if (bp->dev->mtu > 1500) {
1895 /* Set extended packet length bit */
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val | 0x4000);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val | 0x1);
1904 bnx2_write_phy(bp, 0x18, 0x7);
1905 bnx2_read_phy(bp, 0x18, &val);
1906 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1908 bnx2_read_phy(bp, 0x10, &val);
1909 bnx2_write_phy(bp, 0x10, val & ~0x1);
1912 /* ethernet@wirespeed */
1913 bnx2_write_phy(bp, 0x18, 0x7007);
1914 bnx2_read_phy(bp, 0x18, &val);
1915 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1921 bnx2_init_phy(struct bnx2 *bp)
1926 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1929 bp->mii_bmcr = MII_BMCR;
1930 bp->mii_bmsr = MII_BMSR;
1931 bp->mii_bmsr1 = MII_BMSR;
1932 bp->mii_adv = MII_ADVERTISE;
1933 bp->mii_lpa = MII_LPA;
1935 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1940 bnx2_read_phy(bp, MII_PHYSID1, &val);
1941 bp->phy_id = val << 16;
1942 bnx2_read_phy(bp, MII_PHYSID2, &val);
1943 bp->phy_id |= val & 0xffff;
1945 if (bp->phy_flags & PHY_SERDES_FLAG) {
1946 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947 rc = bnx2_init_5706s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949 rc = bnx2_init_5708s_phy(bp);
1950 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951 rc = bnx2_init_5709s_phy(bp);
1954 rc = bnx2_init_copper_phy(bp);
1959 rc = bnx2_setup_phy(bp, bp->phy_port);
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1969 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 static int bnx2_test_link(struct bnx2 *);
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1985 spin_lock_bh(&bp->phy_lock);
1986 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1988 spin_unlock_bh(&bp->phy_lock);
1992 for (i = 0; i < 10; i++) {
1993 if (bnx2_test_link(bp) == 0)
1998 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001 BNX2_EMAC_MODE_25G_MODE);
2003 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2016 msg_data |= bp->fw_wr_seq;
2018 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2020 /* wait for an acknowledgement. */
2021 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2026 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2029 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2032 /* If we timed out, inform the firmware that this is the case. */
2033 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2035 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2038 msg_data &= ~BNX2_DRV_MSG_CODE;
2039 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2041 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2046 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2053 bnx2_init_5709_context(struct bnx2 *bp)
2058 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059 val |= (BCM_PAGE_BITS - 8) << 16;
2060 REG_WR(bp, BNX2_CTX_COMMAND, val);
2061 for (i = 0; i < 10; i++) {
2062 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2067 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2070 for (i = 0; i < bp->ctx_pages; i++) {
2073 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077 (u64) bp->ctx_blk_mapping[i] >> 32);
2078 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080 for (j = 0; j < 10; j++) {
2082 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2087 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2096 bnx2_init_context(struct bnx2 *bp)
2102 u32 vcid_addr, pcid_addr, offset;
2107 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2110 vcid_addr = GET_PCID_ADDR(vcid);
2112 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2117 pcid_addr = GET_PCID_ADDR(new_vcid);
2120 vcid_addr = GET_CID_ADDR(vcid);
2121 pcid_addr = vcid_addr;
2124 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125 vcid_addr += (i << PHY_CTX_SHIFT);
2126 pcid_addr += (i << PHY_CTX_SHIFT);
2128 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2131 /* Zero out the context. */
2132 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133 CTX_WR(bp, 0x00, offset, 0);
2135 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2148 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149 if (good_mbuf == NULL) {
2150 printk(KERN_ERR PFX "Failed to allocate memory in "
2151 "bnx2_alloc_bad_rbuf\n");
2155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2160 /* Allocate a bunch of mbufs and save the good ones in an array. */
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2165 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2167 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2169 /* The addresses with Bit 9 set are bad memory blocks. */
2170 if (!(val & (1 << 9))) {
2171 good_mbuf[good_mbuf_cnt] = (u16) val;
2175 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2178 /* Free the good ones back to the mbuf pool thus discarding
2179 * all the bad ones. */
2180 while (good_mbuf_cnt) {
2183 val = good_mbuf[good_mbuf_cnt];
2184 val = (val << 9) | val | 1;
2186 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2196 u8 *mac_addr = bp->dev->dev_addr;
2198 val = (mac_addr[0] << 8) | mac_addr[1];
2200 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203 (mac_addr[4] << 8) | mac_addr[5];
2205 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2211 struct sk_buff *skb;
2212 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2214 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215 unsigned long align;
2217 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2222 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223 skb_reserve(skb, BNX2_RX_ALIGN - align);
2225 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226 PCI_DMA_FROMDEVICE);
2229 pci_unmap_addr_set(rx_buf, mapping, mapping);
2231 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2234 bp->rx_prod_bseq += bp->rx_buf_use_size;
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2242 struct status_block *sblk = bp->status_blk;
2243 u32 new_link_state, old_link_state;
2246 new_link_state = sblk->status_attn_bits & event;
2247 old_link_state = sblk->status_attn_bits_ack & event;
2248 if (new_link_state != old_link_state) {
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2252 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2260 bnx2_phy_int(struct bnx2 *bp)
2262 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263 spin_lock(&bp->phy_lock);
2265 spin_unlock(&bp->phy_lock);
2267 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268 bnx2_set_remote_link(bp);
2273 bnx2_tx_int(struct bnx2 *bp)
2275 struct status_block *sblk = bp->status_blk;
2276 u16 hw_cons, sw_cons, sw_ring_cons;
2279 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2283 sw_cons = bp->tx_cons;
2285 while (sw_cons != hw_cons) {
2286 struct sw_bd *tx_buf;
2287 struct sk_buff *skb;
2290 sw_ring_cons = TX_RING_IDX(sw_cons);
2292 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2295 /* partial BD completions possible with TSO packets */
2296 if (skb_is_gso(skb)) {
2297 u16 last_idx, last_ring_idx;
2299 last_idx = sw_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 last_ring_idx = sw_ring_cons +
2302 skb_shinfo(skb)->nr_frags + 1;
2303 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2306 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2311 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312 skb_headlen(skb), PCI_DMA_TODEVICE);
2315 last = skb_shinfo(skb)->nr_frags;
2317 for (i = 0; i < last; i++) {
2318 sw_cons = NEXT_TX_BD(sw_cons);
2320 pci_unmap_page(bp->pdev,
2322 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2324 skb_shinfo(skb)->frags[i].size,
2328 sw_cons = NEXT_TX_BD(sw_cons);
2330 tx_free_bd += last + 1;
2334 hw_cons = bp->hw_tx_cons =
2335 sblk->status_tx_quick_consumer_index0;
2337 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2342 bp->tx_cons = sw_cons;
2343 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344 * before checking for netif_queue_stopped(). Without the
2345 * memory barrier, there is a small possibility that bnx2_start_xmit()
2346 * will miss it and cause the queue to be stopped forever.
2350 if (unlikely(netif_queue_stopped(bp->dev)) &&
2351 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352 netif_tx_lock(bp->dev);
2353 if ((netif_queue_stopped(bp->dev)) &&
2354 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355 netif_wake_queue(bp->dev);
2356 netif_tx_unlock(bp->dev);
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2364 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365 struct rx_bd *cons_bd, *prod_bd;
2367 cons_rx_buf = &bp->rx_buf_ring[cons];
2368 prod_rx_buf = &bp->rx_buf_ring[prod];
2370 pci_dma_sync_single_for_device(bp->pdev,
2371 pci_unmap_addr(cons_rx_buf, mapping),
2372 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2374 bp->rx_prod_bseq += bp->rx_buf_use_size;
2376 prod_rx_buf->skb = skb;
2381 pci_unmap_addr_set(prod_rx_buf, mapping,
2382 pci_unmap_addr(cons_rx_buf, mapping));
2384 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2391 bnx2_rx_int(struct bnx2 *bp, int budget)
2393 struct status_block *sblk = bp->status_blk;
2394 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2395 struct l2_fhdr *rx_hdr;
2398 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2399 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2402 sw_cons = bp->rx_cons;
2403 sw_prod = bp->rx_prod;
2405 /* Memory barrier necessary as speculative reads of the rx
2406 * buffer can be ahead of the index in the status block
2409 while (sw_cons != hw_cons) {
2412 struct sw_bd *rx_buf;
2413 struct sk_buff *skb;
2414 dma_addr_t dma_addr;
2416 sw_ring_cons = RX_RING_IDX(sw_cons);
2417 sw_ring_prod = RX_RING_IDX(sw_prod);
2419 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2424 dma_addr = pci_unmap_addr(rx_buf, mapping);
2426 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2427 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2429 rx_hdr = (struct l2_fhdr *) skb->data;
2430 len = rx_hdr->l2_fhdr_pkt_len - 4;
2432 if ((status = rx_hdr->l2_fhdr_status) &
2433 (L2_FHDR_ERRORS_BAD_CRC |
2434 L2_FHDR_ERRORS_PHY_DECODE |
2435 L2_FHDR_ERRORS_ALIGNMENT |
2436 L2_FHDR_ERRORS_TOO_SHORT |
2437 L2_FHDR_ERRORS_GIANT_FRAME)) {
2442 /* Since we don't have a jumbo ring, copy small packets
2445 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2446 struct sk_buff *new_skb;
2448 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2449 if (new_skb == NULL)
2453 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2454 new_skb->data, len + 2);
2455 skb_reserve(new_skb, 2);
2456 skb_put(new_skb, len);
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2463 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2464 pci_unmap_single(bp->pdev, dma_addr,
2465 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2467 skb_reserve(skb, bp->rx_offset);
2472 bnx2_reuse_rx_skb(bp, skb,
2473 sw_ring_cons, sw_ring_prod);
2477 skb->protocol = eth_type_trans(skb, bp->dev);
2479 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2480 (ntohs(skb->protocol) != 0x8100)) {
2487 skb->ip_summed = CHECKSUM_NONE;
2489 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2490 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2492 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2493 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2494 skb->ip_summed = CHECKSUM_UNNECESSARY;
2498 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2499 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2500 rx_hdr->l2_fhdr_vlan_tag);
2504 netif_receive_skb(skb);
2506 bp->dev->last_rx = jiffies;
2510 sw_cons = NEXT_RX_BD(sw_cons);
2511 sw_prod = NEXT_RX_BD(sw_prod);
2513 if ((rx_pkt == budget))
2516 /* Refresh hw_cons to see if there is new work */
2517 if (sw_cons == hw_cons) {
2518 hw_cons = bp->hw_rx_cons =
2519 sblk->status_rx_quick_consumer_index0;
2520 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2525 bp->rx_cons = sw_cons;
2526 bp->rx_prod = sw_prod;
2528 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2530 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2538 /* MSI ISR - The only difference between this and the INTx ISR
2539 * is that the MSI interrupt is always serviced.
2542 bnx2_msi(int irq, void *dev_instance)
2544 struct net_device *dev = dev_instance;
2545 struct bnx2 *bp = netdev_priv(dev);
2547 prefetch(bp->status_blk);
2548 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2549 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2550 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2552 /* Return here if interrupt is disabled. */
2553 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2556 netif_rx_schedule(dev, &bp->napi);
2562 bnx2_msi_1shot(int irq, void *dev_instance)
2564 struct net_device *dev = dev_instance;
2565 struct bnx2 *bp = netdev_priv(dev);
2567 prefetch(bp->status_blk);
2569 /* Return here if interrupt is disabled. */
2570 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2573 netif_rx_schedule(dev, &bp->napi);
2579 bnx2_interrupt(int irq, void *dev_instance)
2581 struct net_device *dev = dev_instance;
2582 struct bnx2 *bp = netdev_priv(dev);
2583 struct status_block *sblk = bp->status_blk;
2585 /* When using INTx, it is possible for the interrupt to arrive
2586 * at the CPU before the status block posted prior to the
2587 * interrupt. Reading a register will flush the status block.
2588 * When using MSI, the MSI message will always complete after
2589 * the status block write.
2591 if ((sblk->status_idx == bp->last_status_idx) &&
2592 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2593 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2597 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2600 /* Read back to deassert IRQ immediately to avoid too many
2601 * spurious interrupts.
2603 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2605 /* Return here if interrupt is shared and is disabled. */
2606 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2609 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2610 bp->last_status_idx = sblk->status_idx;
2611 __netif_rx_schedule(dev, &bp->napi);
2617 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2618 STATUS_ATTN_BITS_TIMER_ABORT)
2621 bnx2_has_work(struct bnx2 *bp)
2623 struct status_block *sblk = bp->status_blk;
2625 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2626 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2629 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2630 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2637 bnx2_poll(struct napi_struct *napi, int budget)
2639 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2640 struct net_device *dev = bp->dev;
2641 struct status_block *sblk = bp->status_blk;
2642 u32 status_attn_bits = sblk->status_attn_bits;
2643 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2646 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2647 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2651 /* This is needed to take care of transient status
2652 * during link changes.
2654 REG_WR(bp, BNX2_HC_COMMAND,
2655 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2656 REG_RD(bp, BNX2_HC_COMMAND);
2659 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2662 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2663 work_done = bnx2_rx_int(bp, budget);
2665 bp->last_status_idx = bp->status_blk->status_idx;
2668 if (!bnx2_has_work(bp)) {
2669 netif_rx_complete(dev, napi);
2670 if (likely(bp->flags & USING_MSI_FLAG)) {
2671 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2672 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2673 bp->last_status_idx);
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2679 bp->last_status_idx);
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 bp->last_status_idx);
2689 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2690 * from set_multicast.
2693 bnx2_set_rx_mode(struct net_device *dev)
2695 struct bnx2 *bp = netdev_priv(dev);
2696 u32 rx_mode, sort_mode;
2699 spin_lock_bh(&bp->phy_lock);
2701 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2702 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2703 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2705 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2706 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2708 if (!(bp->flags & ASF_ENABLE_FLAG))
2709 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2711 if (dev->flags & IFF_PROMISC) {
2712 /* Promiscuous mode. */
2713 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2714 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2715 BNX2_RPM_SORT_USER0_PROM_VLAN;
2717 else if (dev->flags & IFF_ALLMULTI) {
2718 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2719 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2722 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2725 /* Accept one or more multicast(s). */
2726 struct dev_mc_list *mclist;
2727 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2732 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2734 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2735 i++, mclist = mclist->next) {
2737 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2739 regidx = (bit & 0xe0) >> 5;
2741 mc_filter[regidx] |= (1 << bit);
2744 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2745 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2749 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2752 if (rx_mode != bp->rx_mode) {
2753 bp->rx_mode = rx_mode;
2754 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2758 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2759 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2761 spin_unlock_bh(&bp->phy_lock);
2764 /* To be moved to generic lib/ */
2766 bnx2_gunzip(void *gunzip_buf, unsigned sz, u8 *zbuf, int len)
2768 struct z_stream_s *strm;
2771 /* gzip header (1f,8b,08... 10 bytes total + possible asciz filename)
2775 strm = kmalloc(sizeof(*strm), GFP_KERNEL);
2778 strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2779 if (strm->workspace == NULL)
2782 strm->next_in = zbuf;
2783 strm->avail_in = len;
2784 strm->next_out = gunzip_buf;
2785 strm->avail_out = sz;
2787 rc = zlib_inflateInit2(strm, -MAX_WBITS);
2789 rc = zlib_inflate(strm, Z_FINISH);
2790 /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
2791 if (rc == Z_STREAM_END)
2792 rc = sz - strm->avail_out;
2795 zlib_inflateEnd(strm);
2799 kfree(strm->workspace);
2807 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2814 for (i = 0; i < rv2p_code_len; i += 8) {
2815 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2817 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2820 if (rv2p_proc == RV2P_PROC1) {
2821 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2822 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2825 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2826 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2830 /* Reset the processor, un-stall is done later. */
2831 if (rv2p_proc == RV2P_PROC1) {
2832 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2835 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2840 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2847 val = REG_RD_IND(bp, cpu_reg->mode);
2848 val |= cpu_reg->mode_value_halt;
2849 REG_WR_IND(bp, cpu_reg->mode, val);
2850 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2852 /* Load the Text area. */
2853 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2858 text = vmalloc(FW_BUF_SIZE);
2861 rc = bnx2_gunzip(text, FW_BUF_SIZE, fw->gz_text, fw->gz_text_len);
2866 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2867 REG_WR_IND(bp, offset, cpu_to_le32(text[j]));
2872 /* Load the Data area. */
2873 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2877 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2878 REG_WR_IND(bp, offset, fw->data[j]);
2882 /* Load the SBSS area. */
2883 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2887 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2888 REG_WR_IND(bp, offset, fw->sbss[j]);
2892 /* Load the BSS area. */
2893 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2897 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2898 REG_WR_IND(bp, offset, fw->bss[j]);
2902 /* Load the Read-Only area. */
2903 offset = cpu_reg->spad_base +
2904 (fw->rodata_addr - cpu_reg->mips_view_base);
2908 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2909 REG_WR_IND(bp, offset, fw->rodata[j]);
2913 /* Clear the pre-fetch instruction. */
2914 REG_WR_IND(bp, cpu_reg->inst, 0);
2915 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2917 /* Start the CPU. */
2918 val = REG_RD_IND(bp, cpu_reg->mode);
2919 val &= ~cpu_reg->mode_value_halt;
2920 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2921 REG_WR_IND(bp, cpu_reg->mode, val);
2927 bnx2_init_cpus(struct bnx2 *bp)
2929 struct cpu_reg cpu_reg;
2934 /* Initialize the RV2P processor. */
2935 text = vmalloc(FW_BUF_SIZE);
2938 rc = bnx2_gunzip(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2943 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2945 rc = bnx2_gunzip(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2950 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2953 /* Initialize the RX Processor. */
2954 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2955 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2956 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2957 cpu_reg.state = BNX2_RXP_CPU_STATE;
2958 cpu_reg.state_value_clear = 0xffffff;
2959 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2960 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2961 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2962 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2963 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2964 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2965 cpu_reg.mips_view_base = 0x8000000;
2967 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2968 fw = &bnx2_rxp_fw_09;
2970 fw = &bnx2_rxp_fw_06;
2972 rc = load_cpu_fw(bp, &cpu_reg, fw);
2976 /* Initialize the TX Processor. */
2977 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2978 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2979 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2980 cpu_reg.state = BNX2_TXP_CPU_STATE;
2981 cpu_reg.state_value_clear = 0xffffff;
2982 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2983 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2984 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2985 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2986 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2987 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2988 cpu_reg.mips_view_base = 0x8000000;
2990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2991 fw = &bnx2_txp_fw_09;
2993 fw = &bnx2_txp_fw_06;
2995 rc = load_cpu_fw(bp, &cpu_reg, fw);
2999 /* Initialize the TX Patch-up Processor. */
3000 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3001 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3002 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3003 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3004 cpu_reg.state_value_clear = 0xffffff;
3005 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3006 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3007 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3008 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3009 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3010 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3011 cpu_reg.mips_view_base = 0x8000000;
3013 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3014 fw = &bnx2_tpat_fw_09;
3016 fw = &bnx2_tpat_fw_06;
3018 rc = load_cpu_fw(bp, &cpu_reg, fw);
3022 /* Initialize the Completion Processor. */
3023 cpu_reg.mode = BNX2_COM_CPU_MODE;
3024 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3025 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3026 cpu_reg.state = BNX2_COM_CPU_STATE;
3027 cpu_reg.state_value_clear = 0xffffff;
3028 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3029 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3030 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3031 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3032 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3033 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3034 cpu_reg.mips_view_base = 0x8000000;
3036 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3037 fw = &bnx2_com_fw_09;
3039 fw = &bnx2_com_fw_06;
3041 rc = load_cpu_fw(bp, &cpu_reg, fw);
3045 /* Initialize the Command Processor. */
3046 cpu_reg.mode = BNX2_CP_CPU_MODE;
3047 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3048 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3049 cpu_reg.state = BNX2_CP_CPU_STATE;
3050 cpu_reg.state_value_clear = 0xffffff;
3051 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3052 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3053 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3054 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3055 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3056 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3057 cpu_reg.mips_view_base = 0x8000000;
3059 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3060 fw = &bnx2_cp_fw_09;
3062 rc = load_cpu_fw(bp, &cpu_reg, fw);
3071 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3075 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3081 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3082 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3083 PCI_PM_CTRL_PME_STATUS);
3085 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3086 /* delay required during transition out of D3hot */
3089 val = REG_RD(bp, BNX2_EMAC_MODE);
3090 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3091 val &= ~BNX2_EMAC_MODE_MPKT;
3092 REG_WR(bp, BNX2_EMAC_MODE, val);
3094 val = REG_RD(bp, BNX2_RPM_CONFIG);
3095 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3096 REG_WR(bp, BNX2_RPM_CONFIG, val);
3107 autoneg = bp->autoneg;
3108 advertising = bp->advertising;
3110 bp->autoneg = AUTONEG_SPEED;
3111 bp->advertising = ADVERTISED_10baseT_Half |
3112 ADVERTISED_10baseT_Full |
3113 ADVERTISED_100baseT_Half |
3114 ADVERTISED_100baseT_Full |
3117 bnx2_setup_copper_phy(bp);
3119 bp->autoneg = autoneg;
3120 bp->advertising = advertising;
3122 bnx2_set_mac_addr(bp);
3124 val = REG_RD(bp, BNX2_EMAC_MODE);
3126 /* Enable port mode. */
3127 val &= ~BNX2_EMAC_MODE_PORT;
3128 val |= BNX2_EMAC_MODE_PORT_MII |
3129 BNX2_EMAC_MODE_MPKT_RCVD |
3130 BNX2_EMAC_MODE_ACPI_RCVD |
3131 BNX2_EMAC_MODE_MPKT;
3133 REG_WR(bp, BNX2_EMAC_MODE, val);
3135 /* receive all multicast */
3136 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3137 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140 REG_WR(bp, BNX2_EMAC_RX_MODE,
3141 BNX2_EMAC_RX_MODE_SORT_MODE);
3143 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3144 BNX2_RPM_SORT_USER0_MC_EN;
3145 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3146 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3147 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3148 BNX2_RPM_SORT_USER0_ENA);
3150 /* Need to enable EMAC and RPM for WOL. */
3151 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3152 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3153 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3154 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3156 val = REG_RD(bp, BNX2_RPM_CONFIG);
3157 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3158 REG_WR(bp, BNX2_RPM_CONFIG, val);
3160 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3163 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3166 if (!(bp->flags & NO_WOL_FLAG))
3167 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3169 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3170 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3171 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3180 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3182 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3185 /* No more memory access after this point until
3186 * device is brought back to D0.
3198 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3203 /* Request access to the flash interface. */
3204 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3205 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3206 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3207 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3213 if (j >= NVRAM_TIMEOUT_COUNT)
3220 bnx2_release_nvram_lock(struct bnx2 *bp)
3225 /* Relinquish nvram interface. */
3226 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3228 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3229 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3230 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3236 if (j >= NVRAM_TIMEOUT_COUNT)
3244 bnx2_enable_nvram_write(struct bnx2 *bp)
3248 val = REG_RD(bp, BNX2_MISC_CFG);
3249 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3251 if (bp->flash_info->flags & BNX2_NV_WREN) {
3254 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3255 REG_WR(bp, BNX2_NVM_COMMAND,
3256 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3258 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3261 val = REG_RD(bp, BNX2_NVM_COMMAND);
3262 if (val & BNX2_NVM_COMMAND_DONE)
3266 if (j >= NVRAM_TIMEOUT_COUNT)
3273 bnx2_disable_nvram_write(struct bnx2 *bp)
3277 val = REG_RD(bp, BNX2_MISC_CFG);
3278 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3283 bnx2_enable_nvram_access(struct bnx2 *bp)
3287 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3288 /* Enable both bits, even on read. */
3289 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3290 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3294 bnx2_disable_nvram_access(struct bnx2 *bp)
3298 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3299 /* Disable both bits, even after read. */
3300 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3301 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3302 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3306 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3311 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3312 /* Buffered flash, no erase needed */
3315 /* Build an erase command */
3316 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3317 BNX2_NVM_COMMAND_DOIT;
3319 /* Need to clear DONE bit separately. */
3320 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3322 /* Address of the NVRAM to read from. */
3323 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3325 /* Issue an erase command. */
3326 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3328 /* Wait for completion. */
3329 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3334 val = REG_RD(bp, BNX2_NVM_COMMAND);
3335 if (val & BNX2_NVM_COMMAND_DONE)
3339 if (j >= NVRAM_TIMEOUT_COUNT)
3346 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3351 /* Build the command word. */
3352 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3354 /* Calculate an offset of a buffered flash, not needed for 5709. */
3355 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3356 offset = ((offset / bp->flash_info->page_size) <<
3357 bp->flash_info->page_bits) +
3358 (offset % bp->flash_info->page_size);
3361 /* Need to clear DONE bit separately. */
3362 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3364 /* Address of the NVRAM to read from. */
3365 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3367 /* Issue a read command. */
3368 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3370 /* Wait for completion. */
3371 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3376 val = REG_RD(bp, BNX2_NVM_COMMAND);
3377 if (val & BNX2_NVM_COMMAND_DONE) {
3378 val = REG_RD(bp, BNX2_NVM_READ);
3380 val = be32_to_cpu(val);
3381 memcpy(ret_val, &val, 4);
3385 if (j >= NVRAM_TIMEOUT_COUNT)
3393 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3398 /* Build the command word. */
3399 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3401 /* Calculate an offset of a buffered flash, not needed for 5709. */
3402 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3403 offset = ((offset / bp->flash_info->page_size) <<
3404 bp->flash_info->page_bits) +
3405 (offset % bp->flash_info->page_size);
3408 /* Need to clear DONE bit separately. */
3409 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3411 memcpy(&val32, val, 4);
3412 val32 = cpu_to_be32(val32);
3414 /* Write the data. */
3415 REG_WR(bp, BNX2_NVM_WRITE, val32);
3417 /* Address of the NVRAM to write to. */
3418 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3420 /* Issue the write command. */
3421 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3423 /* Wait for completion. */
3424 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3430 if (j >= NVRAM_TIMEOUT_COUNT)
3437 bnx2_init_nvram(struct bnx2 *bp)
3440 int j, entry_count, rc = 0;
3441 struct flash_spec *flash;
3443 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3444 bp->flash_info = &flash_5709;
3445 goto get_flash_size;
3448 /* Determine the selected interface. */
3449 val = REG_RD(bp, BNX2_NVM_CFG1);
3451 entry_count = ARRAY_SIZE(flash_table);
3453 if (val & 0x40000000) {
3455 /* Flash interface has been reconfigured */
3456 for (j = 0, flash = &flash_table[0]; j < entry_count;
3458 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3459 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3460 bp->flash_info = flash;
3467 /* Not yet been reconfigured */
3469 if (val & (1 << 23))
3470 mask = FLASH_BACKUP_STRAP_MASK;
3472 mask = FLASH_STRAP_MASK;
3474 for (j = 0, flash = &flash_table[0]; j < entry_count;
3477 if ((val & mask) == (flash->strapping & mask)) {
3478 bp->flash_info = flash;
3480 /* Request access to the flash interface. */
3481 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3484 /* Enable access to flash interface */
3485 bnx2_enable_nvram_access(bp);
3487 /* Reconfigure the flash interface */
3488 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3489 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3490 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3491 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3493 /* Disable access to flash interface */
3494 bnx2_disable_nvram_access(bp);
3495 bnx2_release_nvram_lock(bp);
3500 } /* if (val & 0x40000000) */
3502 if (j == entry_count) {
3503 bp->flash_info = NULL;
3504 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3509 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3510 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3512 bp->flash_size = val;
3514 bp->flash_size = bp->flash_info->total_size;
3520 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3524 u32 cmd_flags, offset32, len32, extra;
3529 /* Request access to the flash interface. */
3530 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3533 /* Enable access to flash interface */
3534 bnx2_enable_nvram_access(bp);
3547 pre_len = 4 - (offset & 3);
3549 if (pre_len >= len32) {
3551 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3552 BNX2_NVM_COMMAND_LAST;
3555 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3558 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3563 memcpy(ret_buf, buf + (offset & 3), pre_len);
3570 extra = 4 - (len32 & 3);
3571 len32 = (len32 + 4) & ~3;
3578 cmd_flags = BNX2_NVM_COMMAND_LAST;
3580 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3581 BNX2_NVM_COMMAND_LAST;
3583 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3585 memcpy(ret_buf, buf, 4 - extra);
3587 else if (len32 > 0) {
3590 /* Read the first word. */
3594 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3596 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3598 /* Advance to the next dword. */
3603 while (len32 > 4 && rc == 0) {
3604 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3606 /* Advance to the next dword. */
3615 cmd_flags = BNX2_NVM_COMMAND_LAST;
3616 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3618 memcpy(ret_buf, buf, 4 - extra);
3621 /* Disable access to flash interface */
3622 bnx2_disable_nvram_access(bp);
3624 bnx2_release_nvram_lock(bp);
3630 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3633 u32 written, offset32, len32;
3634 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3636 int align_start, align_end;
3641 align_start = align_end = 0;
3643 if ((align_start = (offset32 & 3))) {
3645 len32 += align_start;
3648 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3653 align_end = 4 - (len32 & 3);
3655 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3659 if (align_start || align_end) {
3660 align_buf = kmalloc(len32, GFP_KERNEL);
3661 if (align_buf == NULL)
3664 memcpy(align_buf, start, 4);
3667 memcpy(align_buf + len32 - 4, end, 4);
3669 memcpy(align_buf + align_start, data_buf, buf_size);
3673 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3674 flash_buffer = kmalloc(264, GFP_KERNEL);
3675 if (flash_buffer == NULL) {
3677 goto nvram_write_end;
3682 while ((written < len32) && (rc == 0)) {
3683 u32 page_start, page_end, data_start, data_end;
3684 u32 addr, cmd_flags;
3687 /* Find the page_start addr */
3688 page_start = offset32 + written;
3689 page_start -= (page_start % bp->flash_info->page_size);
3690 /* Find the page_end addr */
3691 page_end = page_start + bp->flash_info->page_size;
3692 /* Find the data_start addr */
3693 data_start = (written == 0) ? offset32 : page_start;
3694 /* Find the data_end addr */
3695 data_end = (page_end > offset32 + len32) ?
3696 (offset32 + len32) : page_end;
3698 /* Request access to the flash interface. */
3699 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3700 goto nvram_write_end;
3702 /* Enable access to flash interface */
3703 bnx2_enable_nvram_access(bp);
3705 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3706 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3709 /* Read the whole page into the buffer
3710 * (non-buffer flash only) */
3711 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3712 if (j == (bp->flash_info->page_size - 4)) {
3713 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3715 rc = bnx2_nvram_read_dword(bp,
3721 goto nvram_write_end;
3727 /* Enable writes to flash interface (unlock write-protect) */
3728 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3729 goto nvram_write_end;
3731 /* Loop to write back the buffer data from page_start to
3734 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3735 /* Erase the page */
3736 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3737 goto nvram_write_end;
3739 /* Re-enable the write again for the actual write */
3740 bnx2_enable_nvram_write(bp);
3742 for (addr = page_start; addr < data_start;
3743 addr += 4, i += 4) {
3745 rc = bnx2_nvram_write_dword(bp, addr,
3746 &flash_buffer[i], cmd_flags);
3749 goto nvram_write_end;
3755 /* Loop to write the new data from data_start to data_end */
3756 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3757 if ((addr == page_end - 4) ||
3758 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3759 (addr == data_end - 4))) {
3761 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3763 rc = bnx2_nvram_write_dword(bp, addr, buf,
3767 goto nvram_write_end;
3773 /* Loop to write back the buffer data from data_end
3775 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3776 for (addr = data_end; addr < page_end;
3777 addr += 4, i += 4) {
3779 if (addr == page_end-4) {
3780 cmd_flags = BNX2_NVM_COMMAND_LAST;
3782 rc = bnx2_nvram_write_dword(bp, addr,
3783 &flash_buffer[i], cmd_flags);
3786 goto nvram_write_end;
3792 /* Disable writes to flash interface (lock write-protect) */
3793 bnx2_disable_nvram_write(bp);
3795 /* Disable access to flash interface */
3796 bnx2_disable_nvram_access(bp);
3797 bnx2_release_nvram_lock(bp);
3799 /* Increment written */
3800 written += data_end - data_start;
3804 kfree(flash_buffer);
3810 bnx2_init_remote_phy(struct bnx2 *bp)
3814 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3815 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3818 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3819 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3822 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3823 if (netif_running(bp->dev)) {
3824 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3825 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3826 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3829 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3831 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3832 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3833 bp->phy_port = PORT_FIBRE;
3835 bp->phy_port = PORT_TP;
3840 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3845 /* Wait for the current PCI transaction to complete before
3846 * issuing a reset. */
3847 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3848 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3849 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3850 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3851 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3852 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3855 /* Wait for the firmware to tell us it is ok to issue a reset. */
3856 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3858 /* Deposit a driver reset signature so the firmware knows that
3859 * this is a soft reset. */
3860 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3861 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3863 /* Do a dummy read to force the chip to complete all current transaction