blob: 9789f05cbc99f049cf57d4033ca35593d677a8dc [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan72fbaeb2007-05-03 13:25:32 -07003 * Copyright (c) 2004-2007 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
Michael Chanf2a4f052006-03-23 01:13:12 -080012
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
Michael Chanc86a31f2006-06-13 15:03:47 -070034#include <asm/page.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080035#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
Michael Chanf2a4f052006-03-23 01:13:12 -080042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080045#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
Michael Chan29b12172006-03-23 01:13:43 -080048#include <linux/cache.h>
Michael Chanfba9fe92006-06-12 22:21:25 -070049#include <linux/zlib.h>
Michael Chanf2a4f052006-03-23 01:13:12 -080050
Michael Chanb6016b72005-05-26 13:03:09 -070051#include "bnx2.h"
52#include "bnx2_fw.h"
Michael Chand43584c2006-11-19 14:14:35 -080053#include "bnx2_fw2.h"
Michael Chanb6016b72005-05-26 13:03:09 -070054
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
Michael Chan72fbaeb2007-05-03 13:25:32 -070057#define DRV_MODULE_VERSION "1.5.10"
58#define DRV_MODULE_RELDATE "May 1, 2007"
Michael Chanb6016b72005-05-26 13:03:09 -070059
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
Randy Dunlape19360f2006-04-10 23:22:06 -070065static const char version[] __devinitdata =
Michael Chanb6016b72005-05-26 13:03:09 -070066 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080084 BCM5708,
85 BCM5708S,
Michael Chanbac0dff2006-11-19 14:15:05 -080086 BCM5709,
Michael Chan27a005b2007-05-03 13:23:41 -070087 BCM5709S,
Michael Chanb6016b72005-05-26 13:03:09 -070088} board_t;
89
90/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050091static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070092 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080099 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanbac0dff2006-11-19 14:15:05 -0800101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
Michael Chan27a005b2007-05-03 13:23:41 -0700102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -0700103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -0700114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -0800118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanbac0dff2006-11-19 14:15:05 -0800120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
Michael Chan27a005b2007-05-03 13:23:41 -0700122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
Michael Chanb6016b72005-05-26 13:03:09 -0700124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -0800130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -0700131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -0800134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -0800137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -0700139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -0800147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
Michael Chan37137702005-11-04 08:49:17 -0800158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
Michael Chane89bbf12005-08-25 15:36:58 -0700216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
Michael Chan2f8af122006-08-15 01:39:10 -0700218 u32 diff;
Michael Chane89bbf12005-08-25 15:36:58 -0700219
Michael Chan2f8af122006-08-15 01:39:10 -0700220 smp_mb();
Michael Chanfaac9c42006-12-14 15:56:32 -0800221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
Michael Chane89bbf12005-08-25 15:36:58 -0700231 return (bp->tx_ring_size - diff);
232}
233
Michael Chanb6016b72005-05-26 13:03:09 -0700234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
Michael Chan1b8227c2007-05-03 13:24:05 -0700237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
Michael Chan1b8227c2007-05-03 13:24:05 -0700241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
Michael Chanb6016b72005-05-26 13:03:09 -0700244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
Michael Chan1b8227c2007-05-03 13:24:05 -0700249 spin_lock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
Michael Chan1b8227c2007-05-03 13:24:05 -0700252 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
Michael Chan1b8227c2007-05-03 13:24:05 -0700259 spin_lock_bh(&bp->indirect_lock);
Michael Chan59b47d82006-11-19 14:10:45 -0800260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
Michael Chan1b8227c2007-05-03 13:24:05 -0700277 spin_unlock_bh(&bp->indirect_lock);
Michael Chanb6016b72005-05-26 13:03:09 -0700278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400357
Michael Chanb6016b72005-05-26 13:03:09 -0700358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
Michael Chanb6016b72005-05-26 13:03:09 -0700397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
Michael Chanbf5295b2006-03-23 01:11:56 -0800404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -0700405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
Michael Chan13daffa2006-03-20 17:49:20 -0800441 int i;
442
Michael Chan59b47d82006-11-19 14:10:45 -0800443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
Michael Chanb6016b72005-05-26 13:03:09 -0700451 if (bp->status_blk) {
Michael Chan0f31f992006-03-23 01:12:38 -0800452 pci_free_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
Michael Chan0f31f992006-03-23 01:12:38 -0800455 bp->stats_blk = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
Michael Chan13daffa2006-03-20 17:49:20 -0800465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700472 }
Michael Chan13daffa2006-03-20 17:49:20 -0800473 vfree(bp->rx_buf_ring);
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400474 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
Michael Chan0f31f992006-03-23 01:12:38 -0800480 int i, status_blk_size;
Michael Chan13daffa2006-03-20 17:49:20 -0800481
Michael Chan0f31f992006-03-23 01:12:38 -0800482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
Michael Chanb6016b72005-05-26 13:03:09 -0700484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
Michael Chanb6016b72005-05-26 13:03:09 -0700487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
Michael Chan13daffa2006-03-20 17:49:20 -0800494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
Michael Chanb6016b72005-05-26 13:03:09 -0700496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
Michael Chan13daffa2006-03-20 17:49:20 -0800499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
Michael Chanb6016b72005-05-26 13:03:09 -0700511
Michael Chan0f31f992006-03-23 01:12:38 -0800512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
Michael Chanb6016b72005-05-26 13:03:09 -0700518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
Michael Chan0f31f992006-03-23 01:12:38 -0800522 memset(bp->status_blk, 0, bp->status_stats_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700523
Michael Chan0f31f992006-03-23 01:12:38 -0800524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
Michael Chanb6016b72005-05-26 13:03:09 -0700526
Michael Chan0f31f992006-03-23 01:12:38 -0800527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
Michael Chanb6016b72005-05-26 13:03:09 -0700528
Michael Chan59b47d82006-11-19 14:10:45 -0800529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
Michael Chanb6016b72005-05-26 13:03:09 -0700541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
548static void
Michael Chane3648b32005-11-04 08:51:21 -0800549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
Michael Chanca58c3a2007-05-03 13:22:52 -0700588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chane3648b32005-11-04 08:51:21 -0800590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
Michael Chanb6016b72005-05-26 13:03:09 -0700605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
Michael Chane3648b32005-11-04 08:51:21 -0800635
636 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
Michael Chanb6016b72005-05-26 13:03:09 -0700646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
Michael Chan5b0c76a2005-11-04 08:45:49 -0800658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
Michael Chanca58c3a2007-05-03 13:22:52 -0700670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
715static int
Michael Chan27a005b2007-05-03 13:23:41 -0700716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
754static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
Michael Chanca58c3a2007-05-03 13:22:52 -0700791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
Michael Chanca58c3a2007-05-03 13:22:52 -0700803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
Michael Chanca58c3a2007-05-03 13:22:52 -0700825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
Michael Chanca58c3a2007-05-03 13:22:52 -0700842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -0800902 BNX2_EMAC_MODE_25G_MODE);
Michael Chanb6016b72005-05-26 13:03:09 -0700903
904 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800905 switch (bp->line_speed) {
906 case SPEED_10:
Michael Chan59b47d82006-11-19 14:10:45 -0800907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
Michael Chan59b47d82006-11-19 14:10:45 -0800916 val |= BNX2_EMAC_MODE_25G_MODE;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
Michael Chanb6016b72005-05-26 13:03:09 -0700922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
Michael Chan27a005b2007-05-03 13:23:41 -0700953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
Michael Chanb6016b72005-05-26 13:03:09 -0700971static int
Michael Chan605a9e22007-05-03 13:23:13 -0700972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
Michael Chan27a005b2007-05-03 13:23:41 -0700983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
Michael Chan605a9e22007-05-03 13:23:13 -0700986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
Michael Chan27a005b2007-05-03 13:23:41 -0700993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
Michael Chan605a9e22007-05-03 13:23:13 -0700997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
Michael Chan27a005b2007-05-03 13:23:41 -07001009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
Michael Chan605a9e22007-05-03 13:23:13 -07001012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
Michael Chan27a005b2007-05-03 13:23:41 -07001019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
Michael Chan605a9e22007-05-03 13:23:13 -07001023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
Michael Chan27a005b2007-05-03 13:23:41 -07001034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
Michael Chan27a005b2007-05-03 13:23:41 -07001069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
Michael Chanb6016b72005-05-26 13:03:09 -07001093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
Michael Chan80be4432006-11-19 14:07:28 -08001098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
Michael Chanb6016b72005-05-26 13:03:09 -07001099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
Michael Chan27a005b2007-05-03 13:23:41 -07001105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
Michael Chan605a9e22007-05-03 13:23:13 -07001139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001141
Michael Chanb6016b72005-05-26 13:03:09 -07001142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
Michael Chanca58c3a2007-05-03 13:22:52 -07001161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
Michael Chanb6016b72005-05-26 13:03:09 -07001162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
Michael Chanca58c3a2007-05-03 13:22:52 -07001167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
Michael Chan605a9e22007-05-03 13:23:13 -07001216 u32 adv, bmcr;
Michael Chanb6016b72005-05-26 13:03:09 -07001217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001221 int force_link_down = 0;
1222
Michael Chan605a9e22007-05-03 13:23:13 -07001223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001230 bnx2_read_phy(bp, bp->mii_adv, &adv);
Michael Chan80be4432006-11-19 14:07:28 -08001231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
Michael Chanca58c3a2007-05-03 13:22:52 -07001233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001234 new_bmcr = bmcr & ~BMCR_ANENABLE;
Michael Chan80be4432006-11-19 14:07:28 -08001235 new_bmcr |= BMCR_SPEED1000;
Michael Chan605a9e22007-05-03 13:23:13 -07001236
Michael Chan27a005b2007-05-03 13:23:41 -07001237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
Michael Chan605a9e22007-05-03 13:23:13 -07001246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
Michael Chan5b0c76a2005-11-04 08:45:49 -08001250 }
1251
Michael Chanb6016b72005-05-26 13:03:09 -07001252 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001253 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -07001254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001257 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -07001258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
Michael Chan5b0c76a2005-11-04 08:45:49 -08001260 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001263 bnx2_write_phy(bp, bp->mii_adv, adv &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
Michael Chanca58c3a2007-05-03 13:22:52 -07001266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
Michael Chanb6016b72005-05-26 13:03:09 -07001267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
Michael Chanca58c3a2007-05-03 13:22:52 -07001271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan80be4432006-11-19 14:07:28 -08001272 bnx2_report_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001273 }
Michael Chanca58c3a2007-05-03 13:22:52 -07001274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chan605a9e22007-05-03 13:23:13 -07001276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001279 }
1280 return 0;
1281 }
1282
Michael Chan605a9e22007-05-03 13:23:13 -07001283 bnx2_test_and_enable_2g5(bp);
Michael Chan5b0c76a2005-11-04 08:45:49 -08001284
Michael Chanb6016b72005-05-26 13:03:09 -07001285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
Michael Chanca58c3a2007-05-03 13:22:52 -07001290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
Michael Chanca58c3a2007-05-03 13:22:52 -07001297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chan80be4432006-11-19 14:07:28 -08001298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001301 }
1302
Michael Chanca58c3a2007-05-03 13:22:52 -07001303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001305 BMCR_ANENABLE);
Michael Chanf8dd0642006-11-19 14:08:29 -08001306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan605a9e22007-05-03 13:23:13 -07001317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001335
Michael Chanb6016b72005-05-26 13:03:09 -07001336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
Michael Chanca58c3a2007-05-03 13:22:52 -07001344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
Michael Chanca58c3a2007-05-03 13:22:52 -07001351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001368
Michael Chanb6016b72005-05-26 13:03:09 -07001369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
Michael Chanca58c3a2007-05-03 13:22:52 -07001377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
Michael Chanb6016b72005-05-26 13:03:09 -07001378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
Michael Chanca58c3a2007-05-03 13:22:52 -07001379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
Michael Chanb6016b72005-05-26 13:03:09 -07001380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
Michael Chanb6016b72005-05-26 13:03:09 -07001401
Michael Chanca58c3a2007-05-03 13:22:52 -07001402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001404
Michael Chanb6016b72005-05-26 13:03:09 -07001405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
Michael Chanca58c3a2007-05-03 13:22:52 -07001407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chana16dda02006-11-19 14:08:56 -08001408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
Michael Chanca58c3a2007-05-03 13:22:52 -07001412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
Michael Chanb6016b72005-05-26 13:03:09 -07001414 }
1415
Michael Chanca58c3a2007-05-03 13:22:52 -07001416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07001417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
Michael Chan27a005b2007-05-03 13:23:41 -07001428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
1449static int
Michael Chan27a005b2007-05-03 13:23:41 -07001450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
1498static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
Michael Chan27a005b2007-05-03 13:23:41 -07001503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
Michael Chan5b0c76a2005-11-04 08:45:49 -08001507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
Michael Chane3648b32005-11-04 08:51:21 -08001537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
Michael Chane3648b32005-11-04 08:51:21 -08001543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001558{
Michael Chan27a005b2007-05-03 13:23:41 -07001559 bnx2_reset_phy(bp);
1560
Michael Chanb6016b72005-05-26 13:03:09 -07001561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
Michael Chan59b47d82006-11-19 14:10:45 -08001563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
Michael Chanb6016b72005-05-26 13:03:09 -07001565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001596 u32 val;
1597
Michael Chan27a005b2007-05-03 13:23:41 -07001598 bnx2_reset_phy(bp);
1599
Michael Chanb6016b72005-05-26 13:03:09 -07001600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
Michael Chanb659f442007-02-02 00:46:35 -08001611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
Michael Chanb6016b72005-05-26 13:03:09 -07001619 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
Michael Chan5b0c76a2005-11-04 08:45:49 -08001637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
Michael Chanca58c3a2007-05-03 13:22:52 -07001654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
Michael Chan27a005b2007-05-03 13:23:41 -07001656 bp->mii_bmsr1 = MII_BMSR;
Michael Chanca58c3a2007-05-03 13:22:52 -07001657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
Michael Chanb6016b72005-05-26 13:03:09 -07001660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
Michael Chanb6016b72005-05-26 13:03:09 -07001662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07001672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
Michael Chanbc5a0692006-01-23 16:13:22 -08001697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
Michael Chanca58c3a2007-05-03 13:22:52 -07001706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
Michael Chanbc5a0692006-01-23 16:13:22 -08001707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
Michael Chan80be4432006-11-19 14:07:28 -08001715 msleep(100);
Michael Chanbc5a0692006-01-23 16:13:22 -08001716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
Michael Chan59b47d82006-11-19 14:10:45 -08001721 BNX2_EMAC_MODE_25G_MODE);
Michael Chanbc5a0692006-01-23 16:13:22 -08001722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
Michael Chanb6016b72005-05-26 13:03:09 -07001729static int
Michael Chanb090ae22006-01-23 16:07:10 -08001730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001731{
1732 int i;
1733 u32 val;
1734
Michael Chanb6016b72005-05-26 13:03:09 -07001735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
Michael Chane3648b32005-11-04 08:51:21 -08001738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001739
1740 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001743
Michael Chane3648b32005-11-04 08:51:21 -08001744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
Michael Chanb090ae22006-01-23 16:07:10 -08001749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001751
1752 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
Michael Chane3648b32005-11-04 08:51:21 -08001761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001762
Michael Chanb6016b72005-05-26 13:03:09 -07001763 return -EBUSY;
1764 }
1765
Michael Chanb090ae22006-01-23 16:07:10 -08001766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
Michael Chanb6016b72005-05-26 13:03:09 -07001769 return 0;
1770}
1771
Michael Chan59b47d82006-11-19 14:10:45 -08001772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1782 int j;
1783
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1792
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795 break;
1796 udelay(5);
1797 }
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799 ret = -EBUSY;
1800 break;
1801 }
1802 }
1803 return ret;
1804}
1805
Michael Chanb6016b72005-05-26 13:03:09 -07001806static void
1807bnx2_init_context(struct bnx2 *bp)
1808{
1809 u32 vcid;
1810
1811 vcid = 96;
1812 while (vcid) {
1813 u32 vcid_addr, pcid_addr, offset;
Michael Chan7947b202007-06-04 21:17:10 -07001814 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07001815
1816 vcid--;
1817
1818 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1819 u32 new_vcid;
1820
1821 vcid_addr = GET_PCID_ADDR(vcid);
1822 if (vcid & 0x8) {
1823 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1824 }
1825 else {
1826 new_vcid = vcid;
1827 }
1828 pcid_addr = GET_PCID_ADDR(new_vcid);
1829 }
1830 else {
1831 vcid_addr = GET_CID_ADDR(vcid);
1832 pcid_addr = vcid_addr;
1833 }
1834
Michael Chan7947b202007-06-04 21:17:10 -07001835 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1836 vcid_addr += (i << PHY_CTX_SHIFT);
1837 pcid_addr += (i << PHY_CTX_SHIFT);
Michael Chanb6016b72005-05-26 13:03:09 -07001838
Michael Chan7947b202007-06-04 21:17:10 -07001839 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1840 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1841
1842 /* Zero out the context. */
1843 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1844 CTX_WR(bp, 0x00, offset, 0);
1845
1846 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1847 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
Michael Chanb6016b72005-05-26 13:03:09 -07001848 }
Michael Chanb6016b72005-05-26 13:03:09 -07001849 }
1850}
1851
1852static int
1853bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1854{
1855 u16 *good_mbuf;
1856 u32 good_mbuf_cnt;
1857 u32 val;
1858
1859 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1860 if (good_mbuf == NULL) {
1861 printk(KERN_ERR PFX "Failed to allocate memory in "
1862 "bnx2_alloc_bad_rbuf\n");
1863 return -ENOMEM;
1864 }
1865
1866 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1867 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1868
1869 good_mbuf_cnt = 0;
1870
1871 /* Allocate a bunch of mbufs and save the good ones in an array. */
1872 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1873 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1874 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1875
1876 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1877
1878 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1879
1880 /* The addresses with Bit 9 set are bad memory blocks. */
1881 if (!(val & (1 << 9))) {
1882 good_mbuf[good_mbuf_cnt] = (u16) val;
1883 good_mbuf_cnt++;
1884 }
1885
1886 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1887 }
1888
1889 /* Free the good ones back to the mbuf pool thus discarding
1890 * all the bad ones. */
1891 while (good_mbuf_cnt) {
1892 good_mbuf_cnt--;
1893
1894 val = good_mbuf[good_mbuf_cnt];
1895 val = (val << 9) | val | 1;
1896
1897 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1898 }
1899 kfree(good_mbuf);
1900 return 0;
1901}
1902
1903static void
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001904bnx2_set_mac_addr(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001905{
1906 u32 val;
1907 u8 *mac_addr = bp->dev->dev_addr;
1908
1909 val = (mac_addr[0] << 8) | mac_addr[1];
1910
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1912
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001913 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
Michael Chanb6016b72005-05-26 13:03:09 -07001914 (mac_addr[4] << 8) | mac_addr[5];
1915
1916 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1917}
1918
1919static inline int
1920bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1921{
1922 struct sk_buff *skb;
1923 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1924 dma_addr_t mapping;
Michael Chan13daffa2006-03-20 17:49:20 -08001925 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
Michael Chanb6016b72005-05-26 13:03:09 -07001926 unsigned long align;
1927
Michael Chan932f3772006-08-15 01:39:36 -07001928 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
Michael Chanb6016b72005-05-26 13:03:09 -07001929 if (skb == NULL) {
1930 return -ENOMEM;
1931 }
1932
Michael Chan59b47d82006-11-19 14:10:45 -08001933 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1934 skb_reserve(skb, BNX2_RX_ALIGN - align);
Michael Chanb6016b72005-05-26 13:03:09 -07001935
Michael Chanb6016b72005-05-26 13:03:09 -07001936 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1937 PCI_DMA_FROMDEVICE);
1938
1939 rx_buf->skb = skb;
1940 pci_unmap_addr_set(rx_buf, mapping, mapping);
1941
1942 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1943 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1944
1945 bp->rx_prod_bseq += bp->rx_buf_use_size;
1946
1947 return 0;
1948}
1949
Michael Chanda3e4fb2007-05-03 13:24:23 -07001950static int
1951bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1952{
1953 struct status_block *sblk = bp->status_blk;
1954 u32 new_link_state, old_link_state;
1955 int is_set = 1;
1956
1957 new_link_state = sblk->status_attn_bits & event;
1958 old_link_state = sblk->status_attn_bits_ack & event;
1959 if (new_link_state != old_link_state) {
1960 if (new_link_state)
1961 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1962 else
1963 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1964 } else
1965 is_set = 0;
1966
1967 return is_set;
1968}
1969
Michael Chanb6016b72005-05-26 13:03:09 -07001970static void
1971bnx2_phy_int(struct bnx2 *bp)
1972{
Michael Chanda3e4fb2007-05-03 13:24:23 -07001973 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1974 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001975 bnx2_set_link(bp);
Michael Chanda3e4fb2007-05-03 13:24:23 -07001976 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001977 }
1978}
1979
1980static void
1981bnx2_tx_int(struct bnx2 *bp)
1982{
Michael Chanf4e418f2005-11-04 08:53:48 -08001983 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001984 u16 hw_cons, sw_cons, sw_ring_cons;
1985 int tx_free_bd = 0;
1986
Michael Chanf4e418f2005-11-04 08:53:48 -08001987 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001988 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1989 hw_cons++;
1990 }
1991 sw_cons = bp->tx_cons;
1992
1993 while (sw_cons != hw_cons) {
1994 struct sw_bd *tx_buf;
1995 struct sk_buff *skb;
1996 int i, last;
1997
1998 sw_ring_cons = TX_RING_IDX(sw_cons);
1999
2000 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2001 skb = tx_buf->skb;
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002002
Michael Chanb6016b72005-05-26 13:03:09 -07002003 /* partial BD completions possible with TSO packets */
Herbert Xu89114af2006-07-08 13:34:32 -07002004 if (skb_is_gso(skb)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002005 u16 last_idx, last_ring_idx;
2006
2007 last_idx = sw_cons +
2008 skb_shinfo(skb)->nr_frags + 1;
2009 last_ring_idx = sw_ring_cons +
2010 skb_shinfo(skb)->nr_frags + 1;
2011 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2012 last_idx++;
2013 }
2014 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2015 break;
2016 }
2017 }
Arjan van de Ven1d39ed52006-12-12 14:06:23 +01002018
Michael Chanb6016b72005-05-26 13:03:09 -07002019 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2020 skb_headlen(skb), PCI_DMA_TODEVICE);
2021
2022 tx_buf->skb = NULL;
2023 last = skb_shinfo(skb)->nr_frags;
2024
2025 for (i = 0; i < last; i++) {
2026 sw_cons = NEXT_TX_BD(sw_cons);
2027
2028 pci_unmap_page(bp->pdev,
2029 pci_unmap_addr(
2030 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2031 mapping),
2032 skb_shinfo(skb)->frags[i].size,
2033 PCI_DMA_TODEVICE);
2034 }
2035
2036 sw_cons = NEXT_TX_BD(sw_cons);
2037
2038 tx_free_bd += last + 1;
2039
Michael Chan745720e2006-06-29 12:37:41 -07002040 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002041
Michael Chanf4e418f2005-11-04 08:53:48 -08002042 hw_cons = bp->hw_tx_cons =
2043 sblk->status_tx_quick_consumer_index0;
2044
Michael Chanb6016b72005-05-26 13:03:09 -07002045 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2046 hw_cons++;
2047 }
2048 }
2049
Michael Chane89bbf12005-08-25 15:36:58 -07002050 bp->tx_cons = sw_cons;
Michael Chan2f8af122006-08-15 01:39:10 -07002051 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2052 * before checking for netif_queue_stopped(). Without the
2053 * memory barrier, there is a small possibility that bnx2_start_xmit()
2054 * will miss it and cause the queue to be stopped forever.
2055 */
2056 smp_mb();
Michael Chanb6016b72005-05-26 13:03:09 -07002057
Michael Chan2f8af122006-08-15 01:39:10 -07002058 if (unlikely(netif_queue_stopped(bp->dev)) &&
2059 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2060 netif_tx_lock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002061 if ((netif_queue_stopped(bp->dev)) &&
Michael Chan2f8af122006-08-15 01:39:10 -07002062 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
Michael Chanb6016b72005-05-26 13:03:09 -07002063 netif_wake_queue(bp->dev);
Michael Chan2f8af122006-08-15 01:39:10 -07002064 netif_tx_unlock(bp->dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002065 }
Michael Chanb6016b72005-05-26 13:03:09 -07002066}
2067
2068static inline void
2069bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2070 u16 cons, u16 prod)
2071{
Michael Chan236b6392006-03-20 17:49:02 -08002072 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2073 struct rx_bd *cons_bd, *prod_bd;
2074
2075 cons_rx_buf = &bp->rx_buf_ring[cons];
2076 prod_rx_buf = &bp->rx_buf_ring[prod];
Michael Chanb6016b72005-05-26 13:03:09 -07002077
2078 pci_dma_sync_single_for_device(bp->pdev,
2079 pci_unmap_addr(cons_rx_buf, mapping),
2080 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2081
Michael Chan236b6392006-03-20 17:49:02 -08002082 bp->rx_prod_bseq += bp->rx_buf_use_size;
2083
2084 prod_rx_buf->skb = skb;
2085
2086 if (cons == prod)
2087 return;
2088
Michael Chanb6016b72005-05-26 13:03:09 -07002089 pci_unmap_addr_set(prod_rx_buf, mapping,
2090 pci_unmap_addr(cons_rx_buf, mapping));
2091
Michael Chan3fdfcc22006-03-20 17:49:49 -08002092 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2093 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
Michael Chan236b6392006-03-20 17:49:02 -08002094 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2095 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
Michael Chanb6016b72005-05-26 13:03:09 -07002096}
2097
2098static int
2099bnx2_rx_int(struct bnx2 *bp, int budget)
2100{
Michael Chanf4e418f2005-11-04 08:53:48 -08002101 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07002102 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2103 struct l2_fhdr *rx_hdr;
2104 int rx_pkt = 0;
2105
Michael Chanf4e418f2005-11-04 08:53:48 -08002106 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07002107 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2108 hw_cons++;
2109 }
2110 sw_cons = bp->rx_cons;
2111 sw_prod = bp->rx_prod;
2112
2113 /* Memory barrier necessary as speculative reads of the rx
2114 * buffer can be ahead of the index in the status block
2115 */
2116 rmb();
2117 while (sw_cons != hw_cons) {
2118 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08002119 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07002120 struct sw_bd *rx_buf;
2121 struct sk_buff *skb;
Michael Chan236b6392006-03-20 17:49:02 -08002122 dma_addr_t dma_addr;
Michael Chanb6016b72005-05-26 13:03:09 -07002123
2124 sw_ring_cons = RX_RING_IDX(sw_cons);
2125 sw_ring_prod = RX_RING_IDX(sw_prod);
2126
2127 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2128 skb = rx_buf->skb;
Michael Chan236b6392006-03-20 17:49:02 -08002129
2130 rx_buf->skb = NULL;
2131
2132 dma_addr = pci_unmap_addr(rx_buf, mapping);
2133
2134 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002135 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2136
2137 rx_hdr = (struct l2_fhdr *) skb->data;
2138 len = rx_hdr->l2_fhdr_pkt_len - 4;
2139
Michael Chanade2bfe2006-01-23 16:09:51 -08002140 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07002141 (L2_FHDR_ERRORS_BAD_CRC |
2142 L2_FHDR_ERRORS_PHY_DECODE |
2143 L2_FHDR_ERRORS_ALIGNMENT |
2144 L2_FHDR_ERRORS_TOO_SHORT |
2145 L2_FHDR_ERRORS_GIANT_FRAME)) {
2146
2147 goto reuse_rx;
2148 }
2149
2150 /* Since we don't have a jumbo ring, copy small packets
2151 * if mtu > 1500
2152 */
2153 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2154 struct sk_buff *new_skb;
2155
Michael Chan932f3772006-08-15 01:39:36 -07002156 new_skb = netdev_alloc_skb(bp->dev, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002157 if (new_skb == NULL)
2158 goto reuse_rx;
2159
2160 /* aligned copy */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002161 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2162 new_skb->data, len + 2);
Michael Chanb6016b72005-05-26 13:03:09 -07002163 skb_reserve(new_skb, 2);
2164 skb_put(new_skb, len);
Michael Chanb6016b72005-05-26 13:03:09 -07002165
2166 bnx2_reuse_rx_skb(bp, skb,
2167 sw_ring_cons, sw_ring_prod);
2168
2169 skb = new_skb;
2170 }
2171 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
Michael Chan236b6392006-03-20 17:49:02 -08002172 pci_unmap_single(bp->pdev, dma_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07002173 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2174
2175 skb_reserve(skb, bp->rx_offset);
2176 skb_put(skb, len);
2177 }
2178 else {
2179reuse_rx:
2180 bnx2_reuse_rx_skb(bp, skb,
2181 sw_ring_cons, sw_ring_prod);
2182 goto next_rx;
2183 }
2184
2185 skb->protocol = eth_type_trans(skb, bp->dev);
2186
2187 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
Alexey Dobriyand1e100b2006-06-11 20:57:17 -07002188 (ntohs(skb->protocol) != 0x8100)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002189
Michael Chan745720e2006-06-29 12:37:41 -07002190 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07002191 goto next_rx;
2192
2193 }
2194
Michael Chanb6016b72005-05-26 13:03:09 -07002195 skb->ip_summed = CHECKSUM_NONE;
2196 if (bp->rx_csum &&
2197 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2198 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2199
Michael Chanade2bfe2006-01-23 16:09:51 -08002200 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2201 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07002202 skb->ip_summed = CHECKSUM_UNNECESSARY;
2203 }
2204
2205#ifdef BCM_VLAN
2206 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2207 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2208 rx_hdr->l2_fhdr_vlan_tag);
2209 }
2210 else
2211#endif
2212 netif_receive_skb(skb);
2213
2214 bp->dev->last_rx = jiffies;
2215 rx_pkt++;
2216
2217next_rx:
Michael Chanb6016b72005-05-26 13:03:09 -07002218 sw_cons = NEXT_RX_BD(sw_cons);
2219 sw_prod = NEXT_RX_BD(sw_prod);
2220
2221 if ((rx_pkt == budget))
2222 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08002223
2224 /* Refresh hw_cons to see if there is new work */
2225 if (sw_cons == hw_cons) {
2226 hw_cons = bp->hw_rx_cons =
2227 sblk->status_rx_quick_consumer_index0;
2228 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2229 hw_cons++;
2230 rmb();
2231 }
Michael Chanb6016b72005-05-26 13:03:09 -07002232 }
2233 bp->rx_cons = sw_cons;
2234 bp->rx_prod = sw_prod;
2235
2236 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2237
2238 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2239
2240 mmiowb();
2241
2242 return rx_pkt;
2243
2244}
2245
2246/* MSI ISR - The only difference between this and the INTx ISR
2247 * is that the MSI interrupt is always serviced.
2248 */
2249static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002250bnx2_msi(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002251{
2252 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002253 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002254
Michael Chanc921e4c2005-09-08 13:15:32 -07002255 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07002256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2257 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2258 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2259
2260 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002261 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2262 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002263
Michael Chan73eef4c2005-08-25 15:39:15 -07002264 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002265
Michael Chan73eef4c2005-08-25 15:39:15 -07002266 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002267}
2268
2269static irqreturn_t
Michael Chan8e6a72c2007-05-03 13:24:48 -07002270bnx2_msi_1shot(int irq, void *dev_instance)
2271{
2272 struct net_device *dev = dev_instance;
2273 struct bnx2 *bp = netdev_priv(dev);
2274
2275 prefetch(bp->status_blk);
2276
2277 /* Return here if interrupt is disabled. */
2278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2279 return IRQ_HANDLED;
2280
2281 netif_rx_schedule(dev);
2282
2283 return IRQ_HANDLED;
2284}
2285
2286static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002287bnx2_interrupt(int irq, void *dev_instance)
Michael Chanb6016b72005-05-26 13:03:09 -07002288{
2289 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08002290 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002291
2292 /* When using INTx, it is possible for the interrupt to arrive
2293 * at the CPU before the status block posted prior to the
2294 * interrupt. Reading a register will flush the status block.
2295 * When using MSI, the MSI message will always complete after
2296 * the status block write.
2297 */
Michael Chanc921e4c2005-09-08 13:15:32 -07002298 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07002299 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2300 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07002301 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07002302
2303 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2304 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2305 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2306
2307 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07002308 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2309 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002310
Michael Chan73eef4c2005-08-25 15:39:15 -07002311 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002312
Michael Chan73eef4c2005-08-25 15:39:15 -07002313 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07002314}
2315
Michael Chanda3e4fb2007-05-03 13:24:23 -07002316#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2317
Michael Chanf4e418f2005-11-04 08:53:48 -08002318static inline int
2319bnx2_has_work(struct bnx2 *bp)
2320{
2321 struct status_block *sblk = bp->status_blk;
2322
2323 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2324 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2325 return 1;
2326
Michael Chanda3e4fb2007-05-03 13:24:23 -07002327 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2328 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
Michael Chanf4e418f2005-11-04 08:53:48 -08002329 return 1;
2330
2331 return 0;
2332}
2333
Michael Chanb6016b72005-05-26 13:03:09 -07002334static int
2335bnx2_poll(struct net_device *dev, int *budget)
2336{
Michael Chan972ec0d2006-01-23 16:12:43 -08002337 struct bnx2 *bp = netdev_priv(dev);
Michael Chanda3e4fb2007-05-03 13:24:23 -07002338 struct status_block *sblk = bp->status_blk;
2339 u32 status_attn_bits = sblk->status_attn_bits;
2340 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
Michael Chanb6016b72005-05-26 13:03:09 -07002341
Michael Chanda3e4fb2007-05-03 13:24:23 -07002342 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2343 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002344
Michael Chanb6016b72005-05-26 13:03:09 -07002345 bnx2_phy_int(bp);
Michael Chanbf5295b2006-03-23 01:11:56 -08002346
2347 /* This is needed to take care of transient status
2348 * during link changes.
2349 */
2350 REG_WR(bp, BNX2_HC_COMMAND,
2351 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2352 REG_RD(bp, BNX2_HC_COMMAND);
Michael Chanb6016b72005-05-26 13:03:09 -07002353 }
2354
Michael Chanf4e418f2005-11-04 08:53:48 -08002355 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07002356 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07002357
Michael Chanf4e418f2005-11-04 08:53:48 -08002358 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07002359 int orig_budget = *budget;
2360 int work_done;
2361
2362 if (orig_budget > dev->quota)
2363 orig_budget = dev->quota;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002364
Michael Chanb6016b72005-05-26 13:03:09 -07002365 work_done = bnx2_rx_int(bp, orig_budget);
2366 *budget -= work_done;
2367 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07002368 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002369
Michael Chanf4e418f2005-11-04 08:53:48 -08002370 bp->last_status_idx = bp->status_blk->status_idx;
2371 rmb();
2372
2373 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002374 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08002375 if (likely(bp->flags & USING_MSI_FLAG)) {
2376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2377 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2378 bp->last_status_idx);
2379 return 0;
2380 }
Michael Chanb6016b72005-05-26 13:03:09 -07002381 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08002382 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2383 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2384 bp->last_status_idx);
2385
2386 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2387 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2388 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07002389 return 0;
2390 }
2391
2392 return 1;
2393}
2394
Herbert Xu932ff272006-06-09 12:20:56 -07002395/* Called with rtnl_lock from vlan functions and also netif_tx_lock
Michael Chanb6016b72005-05-26 13:03:09 -07002396 * from set_multicast.
2397 */
2398static void
2399bnx2_set_rx_mode(struct net_device *dev)
2400{
Michael Chan972ec0d2006-01-23 16:12:43 -08002401 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07002402 u32 rx_mode, sort_mode;
2403 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07002404
Michael Chanc770a652005-08-25 15:38:39 -07002405 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002406
2407 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2408 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2409 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2410#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08002411 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07002412 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002413#else
Michael Chane29054f2006-01-23 16:06:06 -08002414 if (!(bp->flags & ASF_ENABLE_FLAG))
2415 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07002416#endif
2417 if (dev->flags & IFF_PROMISC) {
2418 /* Promiscuous mode. */
2419 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
Michael Chan75108732006-11-19 14:06:40 -08002420 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2421 BNX2_RPM_SORT_USER0_PROM_VLAN;
Michael Chanb6016b72005-05-26 13:03:09 -07002422 }
2423 else if (dev->flags & IFF_ALLMULTI) {
2424 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2425 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2426 0xffffffff);
2427 }
2428 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2429 }
2430 else {
2431 /* Accept one or more multicast(s). */
2432 struct dev_mc_list *mclist;
2433 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2434 u32 regidx;
2435 u32 bit;
2436 u32 crc;
2437
2438 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2439
2440 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2441 i++, mclist = mclist->next) {
2442
2443 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2444 bit = crc & 0xff;
2445 regidx = (bit & 0xe0) >> 5;
2446 bit &= 0x1f;
2447 mc_filter[regidx] |= (1 << bit);
2448 }
2449
2450 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2451 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2452 mc_filter[i]);
2453 }
2454
2455 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2456 }
2457
2458 if (rx_mode != bp->rx_mode) {
2459 bp->rx_mode = rx_mode;
2460 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2461 }
2462
2463 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2464 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2465 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2466
Michael Chanc770a652005-08-25 15:38:39 -07002467 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002468}
2469
Michael Chanfba9fe92006-06-12 22:21:25 -07002470#define FW_BUF_SIZE 0x8000
2471
2472static int
2473bnx2_gunzip_init(struct bnx2 *bp)
2474{
2475 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2476 goto gunzip_nomem1;
2477
2478 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2479 goto gunzip_nomem2;
2480
2481 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2482 if (bp->strm->workspace == NULL)
2483 goto gunzip_nomem3;
2484
2485 return 0;
2486
2487gunzip_nomem3:
2488 kfree(bp->strm);
2489 bp->strm = NULL;
2490
2491gunzip_nomem2:
2492 vfree(bp->gunzip_buf);
2493 bp->gunzip_buf = NULL;
2494
2495gunzip_nomem1:
2496 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2497 "uncompression.\n", bp->dev->name);
2498 return -ENOMEM;
2499}
2500
2501static void
2502bnx2_gunzip_end(struct bnx2 *bp)
2503{
2504 kfree(bp->strm->workspace);
2505
2506 kfree(bp->strm);
2507 bp->strm = NULL;
2508
2509 if (bp->gunzip_buf) {
2510 vfree(bp->gunzip_buf);
2511 bp->gunzip_buf = NULL;
2512 }
2513}
2514
2515static int
2516bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2517{
2518 int n, rc;
2519
2520 /* check gzip header */
2521 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2522 return -EINVAL;
2523
2524 n = 10;
2525
2526#define FNAME 0x8
2527 if (zbuf[3] & FNAME)
2528 while ((zbuf[n++] != 0) && (n < len));
2529
2530 bp->strm->next_in = zbuf + n;
2531 bp->strm->avail_in = len - n;
2532 bp->strm->next_out = bp->gunzip_buf;
2533 bp->strm->avail_out = FW_BUF_SIZE;
2534
2535 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2536 if (rc != Z_OK)
2537 return rc;
2538
2539 rc = zlib_inflate(bp->strm, Z_FINISH);
2540
2541 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2542 *outbuf = bp->gunzip_buf;
2543
2544 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2545 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2546 bp->dev->name, bp->strm->msg);
2547
2548 zlib_inflateEnd(bp->strm);
2549
2550 if (rc == Z_STREAM_END)
2551 return 0;
2552
2553 return rc;
2554}
2555
Michael Chanb6016b72005-05-26 13:03:09 -07002556static void
2557load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2558 u32 rv2p_proc)
2559{
2560 int i;
2561 u32 val;
2562
2563
2564 for (i = 0; i < rv2p_code_len; i += 8) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002565 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002566 rv2p_code++;
Michael Chanfba9fe92006-06-12 22:21:25 -07002567 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
Michael Chanb6016b72005-05-26 13:03:09 -07002568 rv2p_code++;
2569
2570 if (rv2p_proc == RV2P_PROC1) {
2571 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2572 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2573 }
2574 else {
2575 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2576 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2577 }
2578 }
2579
2580 /* Reset the processor, un-stall is done later. */
2581 if (rv2p_proc == RV2P_PROC1) {
2582 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2583 }
2584 else {
2585 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2586 }
2587}
2588
Michael Chanaf3ee512006-11-19 14:09:25 -08002589static int
Michael Chanb6016b72005-05-26 13:03:09 -07002590load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2591{
2592 u32 offset;
2593 u32 val;
Michael Chanaf3ee512006-11-19 14:09:25 -08002594 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002595
2596 /* Halt the CPU. */
2597 val = REG_RD_IND(bp, cpu_reg->mode);
2598 val |= cpu_reg->mode_value_halt;
2599 REG_WR_IND(bp, cpu_reg->mode, val);
2600 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2601
2602 /* Load the Text area. */
2603 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
Michael Chanaf3ee512006-11-19 14:09:25 -08002604 if (fw->gz_text) {
2605 u32 text_len;
2606 void *text;
2607
2608 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2609 &text_len);
2610 if (rc)
2611 return rc;
2612
2613 fw->text = text;
2614 }
2615 if (fw->gz_text) {
Michael Chanb6016b72005-05-26 13:03:09 -07002616 int j;
2617
2618 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
Michael Chanfba9fe92006-06-12 22:21:25 -07002619 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
Michael Chanb6016b72005-05-26 13:03:09 -07002620 }
2621 }
2622
2623 /* Load the Data area. */
2624 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2625 if (fw->data) {
2626 int j;
2627
2628 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2629 REG_WR_IND(bp, offset, fw->data[j]);
2630 }
2631 }
2632
2633 /* Load the SBSS area. */
2634 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2635 if (fw->sbss) {
2636 int j;
2637
2638 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2639 REG_WR_IND(bp, offset, fw->sbss[j]);
2640 }
2641 }
2642
2643 /* Load the BSS area. */
2644 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2645 if (fw->bss) {
2646 int j;
2647
2648 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2649 REG_WR_IND(bp, offset, fw->bss[j]);
2650 }
2651 }
2652
2653 /* Load the Read-Only area. */
2654 offset = cpu_reg->spad_base +
2655 (fw->rodata_addr - cpu_reg->mips_view_base);
2656 if (fw->rodata) {
2657 int j;
2658
2659 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2660 REG_WR_IND(bp, offset, fw->rodata[j]);
2661 }
2662 }
2663
2664 /* Clear the pre-fetch instruction. */
2665 REG_WR_IND(bp, cpu_reg->inst, 0);
2666 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2667
2668 /* Start the CPU. */
2669 val = REG_RD_IND(bp, cpu_reg->mode);
2670 val &= ~cpu_reg->mode_value_halt;
2671 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2672 REG_WR_IND(bp, cpu_reg->mode, val);
Michael Chanaf3ee512006-11-19 14:09:25 -08002673
2674 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07002675}
2676
Michael Chanfba9fe92006-06-12 22:21:25 -07002677static int
Michael Chanb6016b72005-05-26 13:03:09 -07002678bnx2_init_cpus(struct bnx2 *bp)
2679{
2680 struct cpu_reg cpu_reg;
Michael Chanaf3ee512006-11-19 14:09:25 -08002681 struct fw_info *fw;
Michael Chanfba9fe92006-06-12 22:21:25 -07002682 int rc = 0;
2683 void *text;
2684 u32 text_len;
2685
2686 if ((rc = bnx2_gunzip_init(bp)) != 0)
2687 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002688
2689 /* Initialize the RV2P processor. */
Michael Chanfba9fe92006-06-12 22:21:25 -07002690 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2691 &text_len);
2692 if (rc)
2693 goto init_cpu_err;
2694
2695 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2696
2697 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2698 &text_len);
2699 if (rc)
2700 goto init_cpu_err;
2701
2702 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
Michael Chanb6016b72005-05-26 13:03:09 -07002703
2704 /* Initialize the RX Processor. */
2705 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2706 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2707 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2708 cpu_reg.state = BNX2_RXP_CPU_STATE;
2709 cpu_reg.state_value_clear = 0xffffff;
2710 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2711 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2712 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2713 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2714 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2715 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2716 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002717
Michael Chand43584c2006-11-19 14:14:35 -08002718 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2719 fw = &bnx2_rxp_fw_09;
2720 else
2721 fw = &bnx2_rxp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002722
Michael Chanaf3ee512006-11-19 14:09:25 -08002723 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002724 if (rc)
2725 goto init_cpu_err;
2726
Michael Chanb6016b72005-05-26 13:03:09 -07002727 /* Initialize the TX Processor. */
2728 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2729 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2730 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2731 cpu_reg.state = BNX2_TXP_CPU_STATE;
2732 cpu_reg.state_value_clear = 0xffffff;
2733 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2734 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2735 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2736 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2737 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2738 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2739 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002740
Michael Chand43584c2006-11-19 14:14:35 -08002741 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2742 fw = &bnx2_txp_fw_09;
2743 else
2744 fw = &bnx2_txp_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002745
Michael Chanaf3ee512006-11-19 14:09:25 -08002746 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002747 if (rc)
2748 goto init_cpu_err;
2749
Michael Chanb6016b72005-05-26 13:03:09 -07002750 /* Initialize the TX Patch-up Processor. */
2751 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2752 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2753 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2754 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2755 cpu_reg.state_value_clear = 0xffffff;
2756 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2757 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2758 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2759 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2760 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2761 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2762 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002763
Michael Chand43584c2006-11-19 14:14:35 -08002764 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2765 fw = &bnx2_tpat_fw_09;
2766 else
2767 fw = &bnx2_tpat_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002768
Michael Chanaf3ee512006-11-19 14:09:25 -08002769 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002770 if (rc)
2771 goto init_cpu_err;
2772
Michael Chanb6016b72005-05-26 13:03:09 -07002773 /* Initialize the Completion Processor. */
2774 cpu_reg.mode = BNX2_COM_CPU_MODE;
2775 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2776 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2777 cpu_reg.state = BNX2_COM_CPU_STATE;
2778 cpu_reg.state_value_clear = 0xffffff;
2779 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2780 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2781 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2782 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2783 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2784 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2785 cpu_reg.mips_view_base = 0x8000000;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002786
Michael Chand43584c2006-11-19 14:14:35 -08002787 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2788 fw = &bnx2_com_fw_09;
2789 else
2790 fw = &bnx2_com_fw_06;
Michael Chanb6016b72005-05-26 13:03:09 -07002791
Michael Chanaf3ee512006-11-19 14:09:25 -08002792 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chanfba9fe92006-06-12 22:21:25 -07002793 if (rc)
2794 goto init_cpu_err;
2795
Michael Chand43584c2006-11-19 14:14:35 -08002796 /* Initialize the Command Processor. */
2797 cpu_reg.mode = BNX2_CP_CPU_MODE;
2798 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2799 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2800 cpu_reg.state = BNX2_CP_CPU_STATE;
2801 cpu_reg.state_value_clear = 0xffffff;
2802 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2803 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2804 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2805 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2806 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2807 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2808 cpu_reg.mips_view_base = 0x8000000;
Michael Chanb6016b72005-05-26 13:03:09 -07002809
Michael Chand43584c2006-11-19 14:14:35 -08002810 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2811 fw = &bnx2_cp_fw_09;
Michael Chanb6016b72005-05-26 13:03:09 -07002812
Adrian Bunk6c1bbcc2006-12-07 15:10:06 -08002813 rc = load_cpu_fw(bp, &cpu_reg, fw);
Michael Chand43584c2006-11-19 14:14:35 -08002814 if (rc)
2815 goto init_cpu_err;
2816 }
Michael Chanfba9fe92006-06-12 22:21:25 -07002817init_cpu_err:
2818 bnx2_gunzip_end(bp);
2819 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07002820}
2821
2822static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002823bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002824{
2825 u16 pmcsr;
2826
2827 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2828
2829 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002830 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002831 u32 val;
2832
2833 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2834 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2835 PCI_PM_CTRL_PME_STATUS);
2836
2837 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2838 /* delay required during transition out of D3hot */
2839 msleep(20);
2840
2841 val = REG_RD(bp, BNX2_EMAC_MODE);
2842 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2843 val &= ~BNX2_EMAC_MODE_MPKT;
2844 REG_WR(bp, BNX2_EMAC_MODE, val);
2845
2846 val = REG_RD(bp, BNX2_RPM_CONFIG);
2847 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2848 REG_WR(bp, BNX2_RPM_CONFIG, val);
2849 break;
2850 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002851 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002852 int i;
2853 u32 val, wol_msg;
2854
2855 if (bp->wol) {
2856 u32 advertising;
2857 u8 autoneg;
2858
2859 autoneg = bp->autoneg;
2860 advertising = bp->advertising;
2861
2862 bp->autoneg = AUTONEG_SPEED;
2863 bp->advertising = ADVERTISED_10baseT_Half |
2864 ADVERTISED_10baseT_Full |
2865 ADVERTISED_100baseT_Half |
2866 ADVERTISED_100baseT_Full |
2867 ADVERTISED_Autoneg;
2868
2869 bnx2_setup_copper_phy(bp);
2870
2871 bp->autoneg = autoneg;
2872 bp->advertising = advertising;
2873
2874 bnx2_set_mac_addr(bp);
2875
2876 val = REG_RD(bp, BNX2_EMAC_MODE);
2877
2878 /* Enable port mode. */
2879 val &= ~BNX2_EMAC_MODE_PORT;
2880 val |= BNX2_EMAC_MODE_PORT_MII |
2881 BNX2_EMAC_MODE_MPKT_RCVD |
2882 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002883 BNX2_EMAC_MODE_MPKT;
2884
2885 REG_WR(bp, BNX2_EMAC_MODE, val);
2886
2887 /* receive all multicast */
2888 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2889 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2890 0xffffffff);
2891 }
2892 REG_WR(bp, BNX2_EMAC_RX_MODE,
2893 BNX2_EMAC_RX_MODE_SORT_MODE);
2894
2895 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2896 BNX2_RPM_SORT_USER0_MC_EN;
2897 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2898 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2899 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2900 BNX2_RPM_SORT_USER0_ENA);
2901
2902 /* Need to enable EMAC and RPM for WOL. */
2903 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2904 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2905 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2906 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2907
2908 val = REG_RD(bp, BNX2_RPM_CONFIG);
2909 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2910 REG_WR(bp, BNX2_RPM_CONFIG, val);
2911
2912 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2913 }
2914 else {
2915 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2916 }
2917
Michael Chandda1e392006-01-23 16:08:14 -08002918 if (!(bp->flags & NO_WOL_FLAG))
2919 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002920
2921 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2922 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2923 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2924
2925 if (bp->wol)
2926 pmcsr |= 3;
2927 }
2928 else {
2929 pmcsr |= 3;
2930 }
2931 if (bp->wol) {
2932 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2933 }
2934 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2935 pmcsr);
2936
2937 /* No more memory access after this point until
2938 * device is brought back to D0.
2939 */
2940 udelay(50);
2941 break;
2942 }
2943 default:
2944 return -EINVAL;
2945 }
2946 return 0;
2947}
2948
2949static int
2950bnx2_acquire_nvram_lock(struct bnx2 *bp)
2951{
2952 u32 val;
2953 int j;
2954
2955 /* Request access to the flash interface. */
2956 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2957 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2958 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2959 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2960 break;
2961
2962 udelay(5);
2963 }
2964
2965 if (j >= NVRAM_TIMEOUT_COUNT)
2966 return -EBUSY;
2967
2968 return 0;
2969}
2970
2971static int
2972bnx2_release_nvram_lock(struct bnx2 *bp)
2973{
2974 int j;
2975 u32 val;
2976
2977 /* Relinquish nvram interface. */
2978 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2979
2980 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2981 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2982 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2983 break;
2984
2985 udelay(5);
2986 }
2987
2988 if (j >= NVRAM_TIMEOUT_COUNT)
2989 return -EBUSY;
2990
2991 return 0;
2992}
2993
2994
2995static int
2996bnx2_enable_nvram_write(struct bnx2 *bp)
2997{
2998 u32 val;
2999
3000 val = REG_RD(bp, BNX2_MISC_CFG);
3001 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3002
3003 if (!bp->flash_info->buffered) {
3004 int j;
3005
3006 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3007 REG_WR(bp, BNX2_NVM_COMMAND,
3008 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3009
3010 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3011 udelay(5);
3012
3013 val = REG_RD(bp, BNX2_NVM_COMMAND);
3014 if (val & BNX2_NVM_COMMAND_DONE)
3015 break;
3016 }
3017
3018 if (j >= NVRAM_TIMEOUT_COUNT)
3019 return -EBUSY;
3020 }
3021 return 0;
3022}
3023
3024static void
3025bnx2_disable_nvram_write(struct bnx2 *bp)
3026{
3027 u32 val;
3028
3029 val = REG_RD(bp, BNX2_MISC_CFG);
3030 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3031}
3032
3033
3034static void
3035bnx2_enable_nvram_access(struct bnx2 *bp)
3036{
3037 u32 val;
3038
3039 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3040 /* Enable both bits, even on read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003041 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003042 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3043}
3044
3045static void
3046bnx2_disable_nvram_access(struct bnx2 *bp)
3047{
3048 u32 val;
3049
3050 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3051 /* Disable both bits, even after read. */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003052 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
Michael Chanb6016b72005-05-26 13:03:09 -07003053 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3054 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3055}
3056
3057static int
3058bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3059{
3060 u32 cmd;
3061 int j;
3062
3063 if (bp->flash_info->buffered)
3064 /* Buffered flash, no erase needed */
3065 return 0;
3066
3067 /* Build an erase command */
3068 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3069 BNX2_NVM_COMMAND_DOIT;
3070
3071 /* Need to clear DONE bit separately. */
3072 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3073
3074 /* Address of the NVRAM to read from. */
3075 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3076
3077 /* Issue an erase command. */
3078 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3079
3080 /* Wait for completion. */
3081 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3082 u32 val;
3083
3084 udelay(5);
3085
3086 val = REG_RD(bp, BNX2_NVM_COMMAND);
3087 if (val & BNX2_NVM_COMMAND_DONE)
3088 break;
3089 }
3090
3091 if (j >= NVRAM_TIMEOUT_COUNT)
3092 return -EBUSY;
3093
3094 return 0;
3095}
3096
3097static int
3098bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3099{
3100 u32 cmd;
3101 int j;
3102
3103 /* Build the command word. */
3104 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3105
3106 /* Calculate an offset of a buffered flash. */
3107 if (bp->flash_info->buffered) {
3108 offset = ((offset / bp->flash_info->page_size) <<
3109 bp->flash_info->page_bits) +
3110 (offset % bp->flash_info->page_size);
3111 }
3112
3113 /* Need to clear DONE bit separately. */
3114 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3115
3116 /* Address of the NVRAM to read from. */
3117 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3118
3119 /* Issue a read command. */
3120 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3121
3122 /* Wait for completion. */
3123 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3124 u32 val;
3125
3126 udelay(5);
3127
3128 val = REG_RD(bp, BNX2_NVM_COMMAND);
3129 if (val & BNX2_NVM_COMMAND_DONE) {
3130 val = REG_RD(bp, BNX2_NVM_READ);
3131
3132 val = be32_to_cpu(val);
3133 memcpy(ret_val, &val, 4);
3134 break;
3135 }
3136 }
3137 if (j >= NVRAM_TIMEOUT_COUNT)
3138 return -EBUSY;
3139
3140 return 0;
3141}
3142
3143
3144static int
3145bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3146{
3147 u32 cmd, val32;
3148 int j;
3149
3150 /* Build the command word. */
3151 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3152
3153 /* Calculate an offset of a buffered flash. */
3154 if (bp->flash_info->buffered) {
3155 offset = ((offset / bp->flash_info->page_size) <<
3156 bp->flash_info->page_bits) +
3157 (offset % bp->flash_info->page_size);
3158 }
3159
3160 /* Need to clear DONE bit separately. */
3161 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3162
3163 memcpy(&val32, val, 4);
3164 val32 = cpu_to_be32(val32);
3165
3166 /* Write the data. */
3167 REG_WR(bp, BNX2_NVM_WRITE, val32);
3168
3169 /* Address of the NVRAM to write to. */
3170 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3171
3172 /* Issue the write command. */
3173 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3174
3175 /* Wait for completion. */
3176 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3177 udelay(5);
3178
3179 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3180 break;
3181 }
3182 if (j >= NVRAM_TIMEOUT_COUNT)
3183 return -EBUSY;
3184
3185 return 0;
3186}
3187
3188static int
3189bnx2_init_nvram(struct bnx2 *bp)
3190{
3191 u32 val;
3192 int j, entry_count, rc;
3193 struct flash_spec *flash;
3194
3195 /* Determine the selected interface. */
3196 val = REG_RD(bp, BNX2_NVM_CFG1);
3197
3198 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3199
3200 rc = 0;
3201 if (val & 0x40000000) {
3202
3203 /* Flash interface has been reconfigured */
3204 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08003205 j++, flash++) {
3206 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3207 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003208 bp->flash_info = flash;
3209 break;
3210 }
3211 }
3212 }
3213 else {
Michael Chan37137702005-11-04 08:49:17 -08003214 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07003215 /* Not yet been reconfigured */
3216
Michael Chan37137702005-11-04 08:49:17 -08003217 if (val & (1 << 23))
3218 mask = FLASH_BACKUP_STRAP_MASK;
3219 else
3220 mask = FLASH_STRAP_MASK;
3221
Michael Chanb6016b72005-05-26 13:03:09 -07003222 for (j = 0, flash = &flash_table[0]; j < entry_count;
3223 j++, flash++) {
3224
Michael Chan37137702005-11-04 08:49:17 -08003225 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003226 bp->flash_info = flash;
3227
3228 /* Request access to the flash interface. */
3229 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3230 return rc;
3231
3232 /* Enable access to flash interface */
3233 bnx2_enable_nvram_access(bp);
3234
3235 /* Reconfigure the flash interface */
3236 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3237 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3238 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3239 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3240
3241 /* Disable access to flash interface */
3242 bnx2_disable_nvram_access(bp);
3243 bnx2_release_nvram_lock(bp);
3244
3245 break;
3246 }
3247 }
3248 } /* if (val & 0x40000000) */
3249
3250 if (j == entry_count) {
3251 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08003252 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08003253 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07003254 }
3255
Michael Chan1122db72006-01-23 16:11:42 -08003256 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3257 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3258 if (val)
3259 bp->flash_size = val;
3260 else
3261 bp->flash_size = bp->flash_info->total_size;
3262
Michael Chanb6016b72005-05-26 13:03:09 -07003263 return rc;
3264}
3265
3266static int
3267bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3268 int buf_size)
3269{
3270 int rc = 0;
3271 u32 cmd_flags, offset32, len32, extra;
3272
3273 if (buf_size == 0)
3274 return 0;
3275
3276 /* Request access to the flash interface. */
3277 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3278 return rc;
3279
3280 /* Enable access to flash interface */
3281 bnx2_enable_nvram_access(bp);
3282
3283 len32 = buf_size;
3284 offset32 = offset;
3285 extra = 0;
3286
3287 cmd_flags = 0;
3288
3289 if (offset32 & 3) {
3290 u8 buf[4];
3291 u32 pre_len;
3292
3293 offset32 &= ~3;
3294 pre_len = 4 - (offset & 3);
3295
3296 if (pre_len >= len32) {
3297 pre_len = len32;
3298 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3299 BNX2_NVM_COMMAND_LAST;
3300 }
3301 else {
3302 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3303 }
3304
3305 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3306
3307 if (rc)
3308 return rc;
3309
3310 memcpy(ret_buf, buf + (offset & 3), pre_len);
3311
3312 offset32 += 4;
3313 ret_buf += pre_len;
3314 len32 -= pre_len;
3315 }
3316 if (len32 & 3) {
3317 extra = 4 - (len32 & 3);
3318 len32 = (len32 + 4) & ~3;
3319 }
3320
3321 if (len32 == 4) {
3322 u8 buf[4];
3323
3324 if (cmd_flags)
3325 cmd_flags = BNX2_NVM_COMMAND_LAST;
3326 else
3327 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3328 BNX2_NVM_COMMAND_LAST;
3329
3330 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3331
3332 memcpy(ret_buf, buf, 4 - extra);
3333 }
3334 else if (len32 > 0) {
3335 u8 buf[4];
3336
3337 /* Read the first word. */
3338 if (cmd_flags)
3339 cmd_flags = 0;
3340 else
3341 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3342
3343 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3344
3345 /* Advance to the next dword. */
3346 offset32 += 4;
3347 ret_buf += 4;
3348 len32 -= 4;
3349
3350 while (len32 > 4 && rc == 0) {
3351 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3352
3353 /* Advance to the next dword. */
3354 offset32 += 4;
3355 ret_buf += 4;
3356 len32 -= 4;
3357 }
3358
3359 if (rc)
3360 return rc;
3361
3362 cmd_flags = BNX2_NVM_COMMAND_LAST;
3363 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3364
3365 memcpy(ret_buf, buf, 4 - extra);
3366 }
3367
3368 /* Disable access to flash interface */
3369 bnx2_disable_nvram_access(bp);
3370
3371 bnx2_release_nvram_lock(bp);
3372
3373 return rc;
3374}
3375
3376static int
3377bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3378 int buf_size)
3379{
3380 u32 written, offset32, len32;
Michael Chane6be7632007-01-08 19:56:13 -08003381 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07003382 int rc = 0;
3383 int align_start, align_end;
3384
3385 buf = data_buf;
3386 offset32 = offset;
3387 len32 = buf_size;
3388 align_start = align_end = 0;
3389
3390 if ((align_start = (offset32 & 3))) {
3391 offset32 &= ~3;
Michael Chanc8738792007-03-30 14:53:06 -07003392 len32 += align_start;
3393 if (len32 < 4)
3394 len32 = 4;
Michael Chanb6016b72005-05-26 13:03:09 -07003395 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3396 return rc;
3397 }
3398
3399 if (len32 & 3) {
Michael Chanc8738792007-03-30 14:53:06 -07003400 align_end = 4 - (len32 & 3);
3401 len32 += align_end;
3402 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3403 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003404 }
3405
3406 if (align_start || align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003407 align_buf = kmalloc(len32, GFP_KERNEL);
3408 if (align_buf == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003409 return -ENOMEM;
3410 if (align_start) {
Michael Chane6be7632007-01-08 19:56:13 -08003411 memcpy(align_buf, start, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003412 }
3413 if (align_end) {
Michael Chane6be7632007-01-08 19:56:13 -08003414 memcpy(align_buf + len32 - 4, end, 4);
Michael Chanb6016b72005-05-26 13:03:09 -07003415 }
Michael Chane6be7632007-01-08 19:56:13 -08003416 memcpy(align_buf + align_start, data_buf, buf_size);
3417 buf = align_buf;
Michael Chanb6016b72005-05-26 13:03:09 -07003418 }
3419
Michael Chanae181bc2006-05-22 16:39:20 -07003420 if (bp->flash_info->buffered == 0) {
3421 flash_buffer = kmalloc(264, GFP_KERNEL);
3422 if (flash_buffer == NULL) {
3423 rc = -ENOMEM;
3424 goto nvram_write_end;
3425 }
3426 }
3427
Michael Chanb6016b72005-05-26 13:03:09 -07003428 written = 0;
3429 while ((written < len32) && (rc == 0)) {
3430 u32 page_start, page_end, data_start, data_end;
3431 u32 addr, cmd_flags;
3432 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07003433
3434 /* Find the page_start addr */
3435 page_start = offset32 + written;
3436 page_start -= (page_start % bp->flash_info->page_size);
3437 /* Find the page_end addr */
3438 page_end = page_start + bp->flash_info->page_size;
3439 /* Find the data_start addr */
3440 data_start = (written == 0) ? offset32 : page_start;
3441 /* Find the data_end addr */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003442 data_end = (page_end > offset32 + len32) ?
Michael Chanb6016b72005-05-26 13:03:09 -07003443 (offset32 + len32) : page_end;
3444
3445 /* Request access to the flash interface. */
3446 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3447 goto nvram_write_end;
3448
3449 /* Enable access to flash interface */
3450 bnx2_enable_nvram_access(bp);
3451
3452 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3453 if (bp->flash_info->buffered == 0) {
3454 int j;
3455
3456 /* Read the whole page into the buffer
3457 * (non-buffer flash only) */
3458 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3459 if (j == (bp->flash_info->page_size - 4)) {
3460 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3461 }
3462 rc = bnx2_nvram_read_dword(bp,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003463 page_start + j,
3464 &flash_buffer[j],
Michael Chanb6016b72005-05-26 13:03:09 -07003465 cmd_flags);
3466
3467 if (rc)
3468 goto nvram_write_end;
3469
3470 cmd_flags = 0;
3471 }
3472 }
3473
3474 /* Enable writes to flash interface (unlock write-protect) */
3475 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3476 goto nvram_write_end;
3477
Michael Chanb6016b72005-05-26 13:03:09 -07003478 /* Loop to write back the buffer data from page_start to
3479 * data_start */
3480 i = 0;
3481 if (bp->flash_info->buffered == 0) {
Michael Chanc8738792007-03-30 14:53:06 -07003482 /* Erase the page */
3483 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3484 goto nvram_write_end;
3485
3486 /* Re-enable the write again for the actual write */
3487 bnx2_enable_nvram_write(bp);
3488
Michael Chanb6016b72005-05-26 13:03:09 -07003489 for (addr = page_start; addr < data_start;
3490 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003491
Michael Chanb6016b72005-05-26 13:03:09 -07003492 rc = bnx2_nvram_write_dword(bp, addr,
3493 &flash_buffer[i], cmd_flags);
3494
3495 if (rc != 0)
3496 goto nvram_write_end;
3497
3498 cmd_flags = 0;
3499 }
3500 }
3501
3502 /* Loop to write the new data from data_start to data_end */
Michael Chanbae25762006-05-22 16:38:38 -07003503 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
Michael Chanb6016b72005-05-26 13:03:09 -07003504 if ((addr == page_end - 4) ||
3505 ((bp->flash_info->buffered) &&
3506 (addr == data_end - 4))) {
3507
3508 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3509 }
3510 rc = bnx2_nvram_write_dword(bp, addr, buf,
3511 cmd_flags);
3512
3513 if (rc != 0)
3514 goto nvram_write_end;
3515
3516 cmd_flags = 0;
3517 buf += 4;
3518 }
3519
3520 /* Loop to write back the buffer data from data_end
3521 * to page_end */
3522 if (bp->flash_info->buffered == 0) {
3523 for (addr = data_end; addr < page_end;
3524 addr += 4, i += 4) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003525
Michael Chanb6016b72005-05-26 13:03:09 -07003526 if (addr == page_end-4) {
3527 cmd_flags = BNX2_NVM_COMMAND_LAST;
3528 }
3529 rc = bnx2_nvram_write_dword(bp, addr,
3530 &flash_buffer[i], cmd_flags);
3531
3532 if (rc != 0)
3533 goto nvram_write_end;
3534
3535 cmd_flags = 0;
3536 }
3537 }
3538
3539 /* Disable writes to flash interface (lock write-protect) */
3540 bnx2_disable_nvram_write(bp);
3541
3542 /* Disable access to flash interface */
3543 bnx2_disable_nvram_access(bp);
3544 bnx2_release_nvram_lock(bp);
3545
3546 /* Increment written */
3547 written += data_end - data_start;
3548 }
3549
3550nvram_write_end:
Michael Chane6be7632007-01-08 19:56:13 -08003551 kfree(flash_buffer);
3552 kfree(align_buf);
Michael Chanb6016b72005-05-26 13:03:09 -07003553 return rc;
3554}
3555
3556static int
3557bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3558{
3559 u32 val;
3560 int i, rc = 0;
3561
3562 /* Wait for the current PCI transaction to complete before
3563 * issuing a reset. */
3564 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3565 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3566 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3567 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3568 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3569 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3570 udelay(5);
3571
Michael Chanb090ae22006-01-23 16:07:10 -08003572 /* Wait for the firmware to tell us it is ok to issue a reset. */
3573 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3574
Michael Chanb6016b72005-05-26 13:03:09 -07003575 /* Deposit a driver reset signature so the firmware knows that
3576 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003577 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003578 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3579
Michael Chanb6016b72005-05-26 13:03:09 -07003580 /* Do a dummy read to force the chip to complete all current transaction
3581 * before we issue a reset. */
3582 val = REG_RD(bp, BNX2_MISC_ID);
3583
Michael Chan234754d2006-11-19 14:11:41 -08003584 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3585 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3586 REG_RD(bp, BNX2_MISC_COMMAND);
3587 udelay(5);
Michael Chanb6016b72005-05-26 13:03:09 -07003588
Michael Chan234754d2006-11-19 14:11:41 -08003589 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3590 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
Michael Chanb6016b72005-05-26 13:03:09 -07003591
Michael Chan234754d2006-11-19 14:11:41 -08003592 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
Michael Chanb6016b72005-05-26 13:03:09 -07003593
Michael Chan234754d2006-11-19 14:11:41 -08003594 } else {
3595 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3596 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3597 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3598
3599 /* Chip reset. */
3600 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3601
3602 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3603 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3604 current->state = TASK_UNINTERRUPTIBLE;
3605 schedule_timeout(HZ / 50);
Michael Chanb6016b72005-05-26 13:03:09 -07003606 }
Michael Chanb6016b72005-05-26 13:03:09 -07003607
Michael Chan234754d2006-11-19 14:11:41 -08003608 /* Reset takes approximate 30 usec */
3609 for (i = 0; i < 10; i++) {
3610 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3611 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3612 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3613 break;
3614 udelay(10);
3615 }
3616
3617 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3618 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3619 printk(KERN_ERR PFX "Chip reset did not complete\n");
3620 return -EBUSY;
3621 }
Michael Chanb6016b72005-05-26 13:03:09 -07003622 }
3623
3624 /* Make sure byte swapping is properly configured. */
3625 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3626 if (val != 0x01020304) {
3627 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3628 return -ENODEV;
3629 }
3630
Michael Chanb6016b72005-05-26 13:03:09 -07003631 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003632 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3633 if (rc)
3634 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003635
3636 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3637 /* Adjust the voltage regular to two steps lower. The default
3638 * of this register is 0x0000000e. */
3639 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3640
3641 /* Remove bad rbuf memory from the free pool. */
3642 rc = bnx2_alloc_bad_rbuf(bp);
3643 }
3644
3645 return rc;
3646}
3647
3648static int
3649bnx2_init_chip(struct bnx2 *bp)
3650{
3651 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003652 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003653
3654 /* Make sure the interrupt is not active. */
3655 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3656
3657 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3658 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3659#ifdef __BIG_ENDIAN
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003660 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003661#endif
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003662 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
Michael Chanb6016b72005-05-26 13:03:09 -07003663 DMA_READ_CHANS << 12 |
3664 DMA_WRITE_CHANS << 16;
3665
3666 val |= (0x2 << 20) | (1 << 11);
3667
Michael Chandda1e392006-01-23 16:08:14 -08003668 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003669 val |= (1 << 23);
3670
3671 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3672 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3673 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3674
3675 REG_WR(bp, BNX2_DMA_CONFIG, val);
3676
3677 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3678 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3679 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3680 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3681 }
3682
3683 if (bp->flags & PCIX_FLAG) {
3684 u16 val16;
3685
3686 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3687 &val16);
3688 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3689 val16 & ~PCI_X_CMD_ERO);
3690 }
3691
3692 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3693 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3694 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3695 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3696
3697 /* Initialize context mapping and zero out the quick contexts. The
3698 * context block must have already been enabled. */
Michael Chan59b47d82006-11-19 14:10:45 -08003699 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3700 bnx2_init_5709_context(bp);
3701 else
3702 bnx2_init_context(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07003703
Michael Chanfba9fe92006-06-12 22:21:25 -07003704 if ((rc = bnx2_init_cpus(bp)) != 0)
3705 return rc;
3706
Michael Chanb6016b72005-05-26 13:03:09 -07003707 bnx2_init_nvram(bp);
3708
3709 bnx2_set_mac_addr(bp);
3710
3711 val = REG_RD(bp, BNX2_MQ_CONFIG);
3712 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3713 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
Michael Chan68c9f752007-04-24 15:35:53 -07003714 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3715 val |= BNX2_MQ_CONFIG_HALT_DIS;
3716
Michael Chanb6016b72005-05-26 13:03:09 -07003717 REG_WR(bp, BNX2_MQ_CONFIG, val);
3718
3719 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3720 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3721 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3722
3723 val = (BCM_PAGE_BITS - 8) << 24;
3724 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3725
3726 /* Configure page size. */
3727 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3728 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3729 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3730 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3731
3732 val = bp->mac_addr[0] +
3733 (bp->mac_addr[1] << 8) +
3734 (bp->mac_addr[2] << 16) +
3735 bp->mac_addr[3] +
3736 (bp->mac_addr[4] << 8) +
3737 (bp->mac_addr[5] << 16);
3738 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3739
3740 /* Program the MTU. Also include 4 bytes for CRC32. */
3741 val = bp->dev->mtu + ETH_HLEN + 4;
3742 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3743 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3744 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3745
3746 bp->last_status_idx = 0;
3747 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3748
3749 /* Set up how to generate a link change interrupt. */
3750 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3751
3752 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3753 (u64) bp->status_blk_mapping & 0xffffffff);
3754 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3755
3756 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3757 (u64) bp->stats_blk_mapping & 0xffffffff);
3758 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3759 (u64) bp->stats_blk_mapping >> 32);
3760
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003761 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
Michael Chanb6016b72005-05-26 13:03:09 -07003762 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3763
3764 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3765 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3766
3767 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3768 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3769
3770 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3771
3772 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3773
3774 REG_WR(bp, BNX2_HC_COM_TICKS,
3775 (bp->com_ticks_int << 16) | bp->com_ticks);
3776
3777 REG_WR(bp, BNX2_HC_CMD_TICKS,
3778 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3779
3780 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3781 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3782
3783 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
Michael Chan8e6a72c2007-05-03 13:24:48 -07003784 val = BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003785 else {
Michael Chan8e6a72c2007-05-03 13:24:48 -07003786 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3787 BNX2_HC_CONFIG_COLLECT_STATS;
Michael Chanb6016b72005-05-26 13:03:09 -07003788 }
3789
Michael Chan8e6a72c2007-05-03 13:24:48 -07003790 if (bp->flags & ONE_SHOT_MSI_FLAG)
3791 val |= BNX2_HC_CONFIG_ONE_SHOT;
3792
3793 REG_WR(bp, BNX2_HC_CONFIG, val);
3794
Michael Chanb6016b72005-05-26 13:03:09 -07003795 /* Clear internal stats counters. */
3796 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3797
Michael Chanda3e4fb2007-05-03 13:24:23 -07003798 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
Michael Chanb6016b72005-05-26 13:03:09 -07003799
Michael Chane29054f2006-01-23 16:06:06 -08003800 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3801 BNX2_PORT_FEATURE_ASF_ENABLED)
3802 bp->flags |= ASF_ENABLE_FLAG;
3803
Michael Chanb6016b72005-05-26 13:03:09 -07003804 /* Initialize the receive filter. */
3805 bnx2_set_rx_mode(bp->dev);
3806
Michael Chanb090ae22006-01-23 16:07:10 -08003807 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3808 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003809
3810 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3811 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3812
3813 udelay(20);
3814
Michael Chanbf5295b2006-03-23 01:11:56 -08003815 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3816
Michael Chanb090ae22006-01-23 16:07:10 -08003817 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003818}
3819
Michael Chan59b47d82006-11-19 14:10:45 -08003820static void
3821bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3822{
3823 u32 val, offset0, offset1, offset2, offset3;
3824
3825 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3826 offset0 = BNX2_L2CTX_TYPE_XI;
3827 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3828 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3829 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3830 } else {
3831 offset0 = BNX2_L2CTX_TYPE;
3832 offset1 = BNX2_L2CTX_CMD_TYPE;
3833 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3834 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3835 }
3836 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3837 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3838
3839 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3840 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3841
3842 val = (u64) bp->tx_desc_mapping >> 32;
3843 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3844
3845 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3846 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3847}
Michael Chanb6016b72005-05-26 13:03:09 -07003848
3849static void
3850bnx2_init_tx_ring(struct bnx2 *bp)
3851{
3852 struct tx_bd *txbd;
Michael Chan59b47d82006-11-19 14:10:45 -08003853 u32 cid;
Michael Chanb6016b72005-05-26 13:03:09 -07003854
Michael Chan2f8af122006-08-15 01:39:10 -07003855 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3856
Michael Chanb6016b72005-05-26 13:03:09 -07003857 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003858
Michael Chanb6016b72005-05-26 13:03:09 -07003859 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3860 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3861
3862 bp->tx_prod = 0;
3863 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003864 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003865 bp->tx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003866
Michael Chan59b47d82006-11-19 14:10:45 -08003867 cid = TX_CID;
3868 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3869 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
Michael Chanb6016b72005-05-26 13:03:09 -07003870
Michael Chan59b47d82006-11-19 14:10:45 -08003871 bnx2_init_tx_context(bp, cid);
Michael Chanb6016b72005-05-26 13:03:09 -07003872}
3873
3874static void
3875bnx2_init_rx_ring(struct bnx2 *bp)
3876{
3877 struct rx_bd *rxbd;
3878 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003879 u16 prod, ring_prod;
Michael Chanb6016b72005-05-26 13:03:09 -07003880 u32 val;
3881
3882 /* 8 for CRC and VLAN */
3883 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
Michael Chan59b47d82006-11-19 14:10:45 -08003884 /* hw alignment */
3885 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
Michael Chanb6016b72005-05-26 13:03:09 -07003886
3887 ring_prod = prod = bp->rx_prod = 0;
3888 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003889 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003890 bp->rx_prod_bseq = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003891
Michael Chan13daffa2006-03-20 17:49:20 -08003892 for (i = 0; i < bp->rx_max_ring; i++) {
3893 int j;
Michael Chanb6016b72005-05-26 13:03:09 -07003894
Michael Chan13daffa2006-03-20 17:49:20 -08003895 rxbd = &bp->rx_desc_ring[i][0];
3896 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3897 rxbd->rx_bd_len = bp->rx_buf_use_size;
3898 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3899 }
3900 if (i == (bp->rx_max_ring - 1))
3901 j = 0;
3902 else
3903 j = i + 1;
3904 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3905 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3906 0xffffffff;
3907 }
Michael Chanb6016b72005-05-26 13:03:09 -07003908
3909 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3910 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3911 val |= 0x02 << 8;
3912 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3913
Michael Chan13daffa2006-03-20 17:49:20 -08003914 val = (u64) bp->rx_desc_mapping[0] >> 32;
Michael Chanb6016b72005-05-26 13:03:09 -07003915 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3916
Michael Chan13daffa2006-03-20 17:49:20 -08003917 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
Michael Chanb6016b72005-05-26 13:03:09 -07003918 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3919
Michael Chan236b6392006-03-20 17:49:02 -08003920 for (i = 0; i < bp->rx_ring_size; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07003921 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3922 break;
3923 }
3924 prod = NEXT_RX_BD(prod);
3925 ring_prod = RX_RING_IDX(prod);
3926 }
3927 bp->rx_prod = prod;
3928
3929 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3930
3931 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3932}
3933
3934static void
Michael Chan13daffa2006-03-20 17:49:20 -08003935bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3936{
3937 u32 num_rings, max;
3938
3939 bp->rx_ring_size = size;
3940 num_rings = 1;
3941 while (size > MAX_RX_DESC_CNT) {
3942 size -= MAX_RX_DESC_CNT;
3943 num_rings++;
3944 }
3945 /* round to next power of 2 */
3946 max = MAX_RX_RINGS;
3947 while ((max & num_rings) == 0)
3948 max >>= 1;
3949
3950 if (num_rings != max)
3951 max <<= 1;
3952
3953 bp->rx_max_ring = max;
3954 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3955}
3956
3957static void
Michael Chanb6016b72005-05-26 13:03:09 -07003958bnx2_free_tx_skbs(struct bnx2 *bp)
3959{
3960 int i;
3961
3962 if (bp->tx_buf_ring == NULL)
3963 return;
3964
3965 for (i = 0; i < TX_DESC_CNT; ) {
3966 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3967 struct sk_buff *skb = tx_buf->skb;
3968 int j, last;
3969
3970 if (skb == NULL) {
3971 i++;
3972 continue;
3973 }
3974
3975 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3976 skb_headlen(skb), PCI_DMA_TODEVICE);
3977
3978 tx_buf->skb = NULL;
3979
3980 last = skb_shinfo(skb)->nr_frags;
3981 for (j = 0; j < last; j++) {
3982 tx_buf = &bp->tx_buf_ring[i + j + 1];
3983 pci_unmap_page(bp->pdev,
3984 pci_unmap_addr(tx_buf, mapping),
3985 skb_shinfo(skb)->frags[j].size,
3986 PCI_DMA_TODEVICE);
3987 }
Michael Chan745720e2006-06-29 12:37:41 -07003988 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07003989 i += j + 1;
3990 }
3991
3992}
3993
3994static void
3995bnx2_free_rx_skbs(struct bnx2 *bp)
3996{
3997 int i;
3998
3999 if (bp->rx_buf_ring == NULL)
4000 return;
4001
Michael Chan13daffa2006-03-20 17:49:20 -08004002 for (i = 0; i < bp->rx_max_ring_idx; i++) {
Michael Chanb6016b72005-05-26 13:03:09 -07004003 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4004 struct sk_buff *skb = rx_buf->skb;
4005
Michael Chan05d0f1c2005-11-04 08:53:48 -08004006 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004007 continue;
4008
4009 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4010 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4011
4012 rx_buf->skb = NULL;
4013
Michael Chan745720e2006-06-29 12:37:41 -07004014 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004015 }
4016}
4017
4018static void
4019bnx2_free_skbs(struct bnx2 *bp)
4020{
4021 bnx2_free_tx_skbs(bp);
4022 bnx2_free_rx_skbs(bp);
4023}
4024
4025static int
4026bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4027{
4028 int rc;
4029
4030 rc = bnx2_reset_chip(bp, reset_code);
4031 bnx2_free_skbs(bp);
4032 if (rc)
4033 return rc;
4034
Michael Chanfba9fe92006-06-12 22:21:25 -07004035 if ((rc = bnx2_init_chip(bp)) != 0)
4036 return rc;
4037
Michael Chanb6016b72005-05-26 13:03:09 -07004038 bnx2_init_tx_ring(bp);
4039 bnx2_init_rx_ring(bp);
4040 return 0;
4041}
4042
4043static int
4044bnx2_init_nic(struct bnx2 *bp)
4045{
4046 int rc;
4047
4048 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4049 return rc;
4050
Michael Chan80be4432006-11-19 14:07:28 -08004051 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004052 bnx2_init_phy(bp);
Michael Chan80be4432006-11-19 14:07:28 -08004053 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004054 bnx2_set_link(bp);
4055 return 0;
4056}
4057
4058static int
4059bnx2_test_registers(struct bnx2 *bp)
4060{
4061 int ret;
Michael Chan5bae30c2007-05-03 13:18:46 -07004062 int i, is_5709;
Arjan van de Venf71e1302006-03-03 21:33:57 -05004063 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004064 u16 offset;
4065 u16 flags;
Michael Chan5bae30c2007-05-03 13:18:46 -07004066#define BNX2_FL_NOT_5709 1
Michael Chanb6016b72005-05-26 13:03:09 -07004067 u32 rw_mask;
4068 u32 ro_mask;
4069 } reg_tbl[] = {
4070 { 0x006c, 0, 0x00000000, 0x0000003f },
4071 { 0x0090, 0, 0xffffffff, 0x00000000 },
4072 { 0x0094, 0, 0x00000000, 0x00000000 },
4073
Michael Chan5bae30c2007-05-03 13:18:46 -07004074 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4075 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4076 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4077 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4078 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4079 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4080 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4081 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4082 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
Michael Chanb6016b72005-05-26 13:03:09 -07004083
Michael Chan5bae30c2007-05-03 13:18:46 -07004084 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4085 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4086 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4087 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4088 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4089 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
Michael Chanb6016b72005-05-26 13:03:09 -07004090
Michael Chan5bae30c2007-05-03 13:18:46 -07004091 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4092 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4093 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004094
4095 { 0x1000, 0, 0x00000000, 0x00000001 },
4096 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07004097
4098 { 0x1408, 0, 0x01c00800, 0x00000000 },
4099 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4100 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004101 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004102 { 0x14b0, 0, 0x00000002, 0x00000001 },
4103 { 0x14b8, 0, 0x00000000, 0x00000000 },
4104 { 0x14c0, 0, 0x00000000, 0x00000009 },
4105 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4106 { 0x14cc, 0, 0x00000000, 0x00000001 },
4107 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004108
4109 { 0x1800, 0, 0x00000000, 0x00000001 },
4110 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07004111
4112 { 0x2800, 0, 0x00000000, 0x00000001 },
4113 { 0x2804, 0, 0x00000000, 0x00003f01 },
4114 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4115 { 0x2810, 0, 0xffff0000, 0x00000000 },
4116 { 0x2814, 0, 0xffff0000, 0x00000000 },
4117 { 0x2818, 0, 0xffff0000, 0x00000000 },
4118 { 0x281c, 0, 0xffff0000, 0x00000000 },
4119 { 0x2834, 0, 0xffffffff, 0x00000000 },
4120 { 0x2840, 0, 0x00000000, 0xffffffff },
4121 { 0x2844, 0, 0x00000000, 0xffffffff },
4122 { 0x2848, 0, 0xffffffff, 0x00000000 },
4123 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4124
4125 { 0x2c00, 0, 0x00000000, 0x00000011 },
4126 { 0x2c04, 0, 0x00000000, 0x00030007 },
4127
Michael Chanb6016b72005-05-26 13:03:09 -07004128 { 0x3c00, 0, 0x00000000, 0x00000001 },
4129 { 0x3c04, 0, 0x00000000, 0x00070000 },
4130 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4131 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4132 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4133 { 0x3c14, 0, 0x00000000, 0xffffffff },
4134 { 0x3c18, 0, 0x00000000, 0xffffffff },
4135 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4136 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004137
4138 { 0x5004, 0, 0x00000000, 0x0000007f },
4139 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004140
Michael Chanb6016b72005-05-26 13:03:09 -07004141 { 0x5c00, 0, 0x00000000, 0x00000001 },
4142 { 0x5c04, 0, 0x00000000, 0x0003000f },
4143 { 0x5c08, 0, 0x00000003, 0x00000000 },
4144 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4145 { 0x5c10, 0, 0x00000000, 0xffffffff },
4146 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4147 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4148 { 0x5c88, 0, 0x00000000, 0x00077373 },
4149 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4150
4151 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4152 { 0x680c, 0, 0xffffffff, 0x00000000 },
4153 { 0x6810, 0, 0xffffffff, 0x00000000 },
4154 { 0x6814, 0, 0xffffffff, 0x00000000 },
4155 { 0x6818, 0, 0xffffffff, 0x00000000 },
4156 { 0x681c, 0, 0xffffffff, 0x00000000 },
4157 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4158 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4159 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4160 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4161 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4162 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4163 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4164 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4165 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4166 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4167 { 0x684c, 0, 0xffffffff, 0x00000000 },
4168 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4169 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4170 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4171 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4172 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4173 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4174
4175 { 0xffff, 0, 0x00000000, 0x00000000 },
4176 };
4177
4178 ret = 0;
Michael Chan5bae30c2007-05-03 13:18:46 -07004179 is_5709 = 0;
4180 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4181 is_5709 = 1;
4182
Michael Chanb6016b72005-05-26 13:03:09 -07004183 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4184 u32 offset, rw_mask, ro_mask, save_val, val;
Michael Chan5bae30c2007-05-03 13:18:46 -07004185 u16 flags = reg_tbl[i].flags;
4186
4187 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4188 continue;
Michael Chanb6016b72005-05-26 13:03:09 -07004189
4190 offset = (u32) reg_tbl[i].offset;
4191 rw_mask = reg_tbl[i].rw_mask;
4192 ro_mask = reg_tbl[i].ro_mask;
4193
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004194 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004195
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004196 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004197
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004198 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004199 if ((val & rw_mask) != 0) {
4200 goto reg_test_err;
4201 }
4202
4203 if ((val & ro_mask) != (save_val & ro_mask)) {
4204 goto reg_test_err;
4205 }
4206
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004207 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004208
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004209 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004210 if ((val & rw_mask) != rw_mask) {
4211 goto reg_test_err;
4212 }
4213
4214 if ((val & ro_mask) != (save_val & ro_mask)) {
4215 goto reg_test_err;
4216 }
4217
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004218 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004219 continue;
4220
4221reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004222 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07004223 ret = -ENODEV;
4224 break;
4225 }
4226 return ret;
4227}
4228
4229static int
4230bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4231{
Arjan van de Venf71e1302006-03-03 21:33:57 -05004232 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07004233 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4234 int i;
4235
4236 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4237 u32 offset;
4238
4239 for (offset = 0; offset < size; offset += 4) {
4240
4241 REG_WR_IND(bp, start + offset, test_pattern[i]);
4242
4243 if (REG_RD_IND(bp, start + offset) !=
4244 test_pattern[i]) {
4245 return -ENODEV;
4246 }
4247 }
4248 }
4249 return 0;
4250}
4251
4252static int
4253bnx2_test_memory(struct bnx2 *bp)
4254{
4255 int ret = 0;
4256 int i;
Michael Chan5bae30c2007-05-03 13:18:46 -07004257 static struct mem_entry {
Michael Chanb6016b72005-05-26 13:03:09 -07004258 u32 offset;
4259 u32 len;
Michael Chan5bae30c2007-05-03 13:18:46 -07004260 } mem_tbl_5706[] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004261 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08004262 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07004263 { 0xe0000, 0x4000 },
4264 { 0x120000, 0x4000 },
4265 { 0x1a0000, 0x4000 },
4266 { 0x160000, 0x4000 },
4267 { 0xffffffff, 0 },
Michael Chan5bae30c2007-05-03 13:18:46 -07004268 },
4269 mem_tbl_5709[] = {
4270 { 0x60000, 0x4000 },
4271 { 0xa0000, 0x3000 },
4272 { 0xe0000, 0x4000 },
4273 { 0x120000, 0x4000 },
4274 { 0x1a0000, 0x4000 },
4275 { 0xffffffff, 0 },
Michael Chanb6016b72005-05-26 13:03:09 -07004276 };
Michael Chan5bae30c2007-05-03 13:18:46 -07004277 struct mem_entry *mem_tbl;
4278
4279 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4280 mem_tbl = mem_tbl_5709;
4281 else
4282 mem_tbl = mem_tbl_5706;
Michael Chanb6016b72005-05-26 13:03:09 -07004283
4284 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4285 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4286 mem_tbl[i].len)) != 0) {
4287 return ret;
4288 }
4289 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004290
Michael Chanb6016b72005-05-26 13:03:09 -07004291 return ret;
4292}
4293
Michael Chanbc5a0692006-01-23 16:13:22 -08004294#define BNX2_MAC_LOOPBACK 0
4295#define BNX2_PHY_LOOPBACK 1
4296
Michael Chanb6016b72005-05-26 13:03:09 -07004297static int
Michael Chanbc5a0692006-01-23 16:13:22 -08004298bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07004299{
4300 unsigned int pkt_size, num_pkts, i;
4301 struct sk_buff *skb, *rx_skb;
4302 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08004303 u16 rx_start_idx, rx_idx;
Michael Chanb6016b72005-05-26 13:03:09 -07004304 dma_addr_t map;
4305 struct tx_bd *txbd;
4306 struct sw_bd *rx_buf;
4307 struct l2_fhdr *rx_hdr;
4308 int ret = -ENODEV;
4309
Michael Chanbc5a0692006-01-23 16:13:22 -08004310 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4311 bp->loopback = MAC_LOOPBACK;
4312 bnx2_set_mac_loopback(bp);
4313 }
4314 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
Michael Chan80be4432006-11-19 14:07:28 -08004315 bp->loopback = PHY_LOOPBACK;
Michael Chanbc5a0692006-01-23 16:13:22 -08004316 bnx2_set_phy_loopback(bp);
4317 }
4318 else
4319 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07004320
4321 pkt_size = 1514;
Michael Chan932f3772006-08-15 01:39:36 -07004322 skb = netdev_alloc_skb(bp->dev, pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08004323 if (!skb)
4324 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07004325 packet = skb_put(skb, pkt_size);
Michael Chan66342922006-12-14 15:57:04 -08004326 memcpy(packet, bp->dev->dev_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07004327 memset(packet + 6, 0x0, 8);
4328 for (i = 14; i < pkt_size; i++)
4329 packet[i] = (unsigned char) (i & 0xff);
4330
4331 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4332 PCI_DMA_TODEVICE);
4333
Michael Chanbf5295b2006-03-23 01:11:56 -08004334 REG_WR(bp, BNX2_HC_COMMAND,
4335 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4336
Michael Chanb6016b72005-05-26 13:03:09 -07004337 REG_RD(bp, BNX2_HC_COMMAND);
4338
4339 udelay(5);
4340 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4341
Michael Chanb6016b72005-05-26 13:03:09 -07004342 num_pkts = 0;
4343
Michael Chanbc5a0692006-01-23 16:13:22 -08004344 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07004345
4346 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4347 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4348 txbd->tx_bd_mss_nbytes = pkt_size;
4349 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4350
4351 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08004352 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4353 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004354
Michael Chan234754d2006-11-19 14:11:41 -08004355 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4356 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004357
4358 udelay(100);
4359
Michael Chanbf5295b2006-03-23 01:11:56 -08004360 REG_WR(bp, BNX2_HC_COMMAND,
4361 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4362
Michael Chanb6016b72005-05-26 13:03:09 -07004363 REG_RD(bp, BNX2_HC_COMMAND);
4364
4365 udelay(5);
4366
4367 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
Michael Chan745720e2006-06-29 12:37:41 -07004368 dev_kfree_skb(skb);
Michael Chanb6016b72005-05-26 13:03:09 -07004369
Michael Chanbc5a0692006-01-23 16:13:22 -08004370 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07004371 goto loopback_test_done;
4372 }
4373
4374 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4375 if (rx_idx != rx_start_idx + num_pkts) {
4376 goto loopback_test_done;
4377 }
4378
4379 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4380 rx_skb = rx_buf->skb;
4381
4382 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4383 skb_reserve(rx_skb, bp->rx_offset);
4384
4385 pci_dma_sync_single_for_cpu(bp->pdev,
4386 pci_unmap_addr(rx_buf, mapping),
4387 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4388
Michael Chanade2bfe2006-01-23 16:09:51 -08004389 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07004390 (L2_FHDR_ERRORS_BAD_CRC |
4391 L2_FHDR_ERRORS_PHY_DECODE |
4392 L2_FHDR_ERRORS_ALIGNMENT |
4393 L2_FHDR_ERRORS_TOO_SHORT |
4394 L2_FHDR_ERRORS_GIANT_FRAME)) {
4395
4396 goto loopback_test_done;
4397 }
4398
4399 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4400 goto loopback_test_done;
4401 }
4402
4403 for (i = 14; i < pkt_size; i++) {
4404 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4405 goto loopback_test_done;
4406 }
4407 }
4408
4409 ret = 0;
4410
4411loopback_test_done:
4412 bp->loopback = 0;
4413 return ret;
4414}
4415
Michael Chanbc5a0692006-01-23 16:13:22 -08004416#define BNX2_MAC_LOOPBACK_FAILED 1
4417#define BNX2_PHY_LOOPBACK_FAILED 2
4418#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4419 BNX2_PHY_LOOPBACK_FAILED)
4420
4421static int
4422bnx2_test_loopback(struct bnx2 *bp)
4423{
4424 int rc = 0;
4425
4426 if (!netif_running(bp->dev))
4427 return BNX2_LOOPBACK_FAILED;
4428
4429 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4430 spin_lock_bh(&bp->phy_lock);
4431 bnx2_init_phy(bp);
4432 spin_unlock_bh(&bp->phy_lock);
4433 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4434 rc |= BNX2_MAC_LOOPBACK_FAILED;
4435 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4436 rc |= BNX2_PHY_LOOPBACK_FAILED;
4437 return rc;
4438}
4439
Michael Chanb6016b72005-05-26 13:03:09 -07004440#define NVRAM_SIZE 0x200
4441#define CRC32_RESIDUAL 0xdebb20e3
4442
4443static int
4444bnx2_test_nvram(struct bnx2 *bp)
4445{
4446 u32 buf[NVRAM_SIZE / 4];
4447 u8 *data = (u8 *) buf;
4448 int rc = 0;
4449 u32 magic, csum;
4450
4451 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4452 goto test_nvram_done;
4453
4454 magic = be32_to_cpu(buf[0]);
4455 if (magic != 0x669955aa) {
4456 rc = -ENODEV;
4457 goto test_nvram_done;
4458 }
4459
4460 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4461 goto test_nvram_done;
4462
4463 csum = ether_crc_le(0x100, data);
4464 if (csum != CRC32_RESIDUAL) {
4465 rc = -ENODEV;
4466 goto test_nvram_done;
4467 }
4468
4469 csum = ether_crc_le(0x100, data + 0x100);
4470 if (csum != CRC32_RESIDUAL) {
4471 rc = -ENODEV;
4472 }
4473
4474test_nvram_done:
4475 return rc;
4476}
4477
4478static int
4479bnx2_test_link(struct bnx2 *bp)
4480{
4481 u32 bmsr;
4482
Michael Chanc770a652005-08-25 15:38:39 -07004483 spin_lock_bh(&bp->phy_lock);
Michael Chan27a005b2007-05-03 13:23:41 -07004484 bnx2_enable_bmsr1(bp);
4485 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4486 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4487 bnx2_disable_bmsr1(bp);
Michael Chanc770a652005-08-25 15:38:39 -07004488 spin_unlock_bh(&bp->phy_lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004489
Michael Chanb6016b72005-05-26 13:03:09 -07004490 if (bmsr & BMSR_LSTATUS) {
4491 return 0;
4492 }
4493 return -ENODEV;
4494}
4495
4496static int
4497bnx2_test_intr(struct bnx2 *bp)
4498{
4499 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07004500 u16 status_idx;
4501
4502 if (!netif_running(bp->dev))
4503 return -ENODEV;
4504
4505 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4506
4507 /* This register is not touched during run-time. */
Michael Chanbf5295b2006-03-23 01:11:56 -08004508 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
Michael Chanb6016b72005-05-26 13:03:09 -07004509 REG_RD(bp, BNX2_HC_COMMAND);
4510
4511 for (i = 0; i < 10; i++) {
4512 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4513 status_idx) {
4514
4515 break;
4516 }
4517
4518 msleep_interruptible(10);
4519 }
4520 if (i < 10)
4521 return 0;
4522
4523 return -ENODEV;
4524}
4525
4526static void
Michael Chan48b01e22006-11-19 14:08:00 -08004527bnx2_5706_serdes_timer(struct bnx2 *bp)
4528{
4529 spin_lock(&bp->phy_lock);
4530 if (bp->serdes_an_pending)
4531 bp->serdes_an_pending--;
4532 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4533 u32 bmcr;
4534
4535 bp->current_interval = bp->timer_interval;
4536
Michael Chanca58c3a2007-05-03 13:22:52 -07004537 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004538
4539 if (bmcr & BMCR_ANENABLE) {
4540 u32 phy1, phy2;
4541
4542 bnx2_write_phy(bp, 0x1c, 0x7c00);
4543 bnx2_read_phy(bp, 0x1c, &phy1);
4544
4545 bnx2_write_phy(bp, 0x17, 0x0f01);
4546 bnx2_read_phy(bp, 0x15, &phy2);
4547 bnx2_write_phy(bp, 0x17, 0x0f01);
4548 bnx2_read_phy(bp, 0x15, &phy2);
4549
4550 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4551 !(phy2 & 0x20)) { /* no CONFIG */
4552
4553 bmcr &= ~BMCR_ANENABLE;
4554 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
Michael Chanca58c3a2007-05-03 13:22:52 -07004555 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004556 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4557 }
4558 }
4559 }
4560 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4561 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4562 u32 phy2;
4563
4564 bnx2_write_phy(bp, 0x17, 0x0f01);
4565 bnx2_read_phy(bp, 0x15, &phy2);
4566 if (phy2 & 0x20) {
4567 u32 bmcr;
4568
Michael Chanca58c3a2007-05-03 13:22:52 -07004569 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004570 bmcr |= BMCR_ANENABLE;
Michael Chanca58c3a2007-05-03 13:22:52 -07004571 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
Michael Chan48b01e22006-11-19 14:08:00 -08004572
4573 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4574 }
4575 } else
4576 bp->current_interval = bp->timer_interval;
4577
4578 spin_unlock(&bp->phy_lock);
4579}
4580
4581static void
Michael Chanf8dd0642006-11-19 14:08:29 -08004582bnx2_5708_serdes_timer(struct bnx2 *bp)
4583{
4584 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4585 bp->serdes_an_pending = 0;
4586 return;
4587 }
4588
4589 spin_lock(&bp->phy_lock);
4590 if (bp->serdes_an_pending)
4591 bp->serdes_an_pending--;
4592 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4593 u32 bmcr;
4594
Michael Chanca58c3a2007-05-03 13:22:52 -07004595 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanf8dd0642006-11-19 14:08:29 -08004596 if (bmcr & BMCR_ANENABLE) {
Michael Chan605a9e22007-05-03 13:23:13 -07004597 bnx2_enable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004598 bp->current_interval = SERDES_FORCED_TIMEOUT;
4599 } else {
Michael Chan605a9e22007-05-03 13:23:13 -07004600 bnx2_disable_forced_2g5(bp);
Michael Chanf8dd0642006-11-19 14:08:29 -08004601 bp->serdes_an_pending = 2;
4602 bp->current_interval = bp->timer_interval;
4603 }
4604
4605 } else
4606 bp->current_interval = bp->timer_interval;
4607
4608 spin_unlock(&bp->phy_lock);
4609}
4610
4611static void
Michael Chanb6016b72005-05-26 13:03:09 -07004612bnx2_timer(unsigned long data)
4613{
4614 struct bnx2 *bp = (struct bnx2 *) data;
4615 u32 msg;
4616
Michael Chancd339a02005-08-25 15:35:24 -07004617 if (!netif_running(bp->dev))
4618 return;
4619
Michael Chanb6016b72005-05-26 13:03:09 -07004620 if (atomic_read(&bp->intr_sem) != 0)
4621 goto bnx2_restart_timer;
4622
4623 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08004624 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07004625
Michael Chancea94db2006-06-12 22:16:13 -07004626 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4627
Michael Chanf8dd0642006-11-19 14:08:29 -08004628 if (bp->phy_flags & PHY_SERDES_FLAG) {
4629 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4630 bnx2_5706_serdes_timer(bp);
Michael Chan27a005b2007-05-03 13:23:41 -07004631 else
Michael Chanf8dd0642006-11-19 14:08:29 -08004632 bnx2_5708_serdes_timer(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004633 }
4634
4635bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07004636 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004637}
4638
Michael Chan8e6a72c2007-05-03 13:24:48 -07004639static int
4640bnx2_request_irq(struct bnx2 *bp)
4641{
4642 struct net_device *dev = bp->dev;
4643 int rc = 0;
4644
4645 if (bp->flags & USING_MSI_FLAG) {
4646 irq_handler_t fn = bnx2_msi;
4647
4648 if (bp->flags & ONE_SHOT_MSI_FLAG)
4649 fn = bnx2_msi_1shot;
4650
4651 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4652 } else
4653 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4654 IRQF_SHARED, dev->name, dev);
4655 return rc;
4656}
4657
4658static void
4659bnx2_free_irq(struct bnx2 *bp)
4660{
4661 struct net_device *dev = bp->dev;
4662
4663 if (bp->flags & USING_MSI_FLAG) {
4664 free_irq(bp->pdev->irq, dev);
4665 pci_disable_msi(bp->pdev);
4666 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4667 } else
4668 free_irq(bp->pdev->irq, dev);
4669}
4670
Michael Chanb6016b72005-05-26 13:03:09 -07004671/* Called with rtnl_lock */
4672static int
4673bnx2_open(struct net_device *dev)
4674{
Michael Chan972ec0d2006-01-23 16:12:43 -08004675 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004676 int rc;
4677
Michael Chan1b2f9222007-05-03 13:20:19 -07004678 netif_carrier_off(dev);
4679
Pavel Machek829ca9a2005-09-03 15:56:56 -07004680 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004681 bnx2_disable_int(bp);
4682
4683 rc = bnx2_alloc_mem(bp);
4684 if (rc)
4685 return rc;
4686
Michael Chan8e6a72c2007-05-03 13:24:48 -07004687 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
Michael Chanb6016b72005-05-26 13:03:09 -07004688 if (pci_enable_msi(bp->pdev) == 0) {
4689 bp->flags |= USING_MSI_FLAG;
Michael Chan8e6a72c2007-05-03 13:24:48 -07004690 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4691 bp->flags |= ONE_SHOT_MSI_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07004692 }
4693 }
Michael Chan8e6a72c2007-05-03 13:24:48 -07004694 rc = bnx2_request_irq(bp);
4695
Michael Chanb6016b72005-05-26 13:03:09 -07004696 if (rc) {
4697 bnx2_free_mem(bp);
4698 return rc;
4699 }
4700
4701 rc = bnx2_init_nic(bp);
4702
4703 if (rc) {
Michael Chan8e6a72c2007-05-03 13:24:48 -07004704 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004705 bnx2_free_skbs(bp);
4706 bnx2_free_mem(bp);
4707 return rc;
4708 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004709
Michael Chancd339a02005-08-25 15:35:24 -07004710 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004711
4712 atomic_set(&bp->intr_sem, 0);
4713
4714 bnx2_enable_int(bp);
4715
4716 if (bp->flags & USING_MSI_FLAG) {
4717 /* Test MSI to make sure it is working
4718 * If MSI test fails, go back to INTx mode
4719 */
4720 if (bnx2_test_intr(bp) != 0) {
4721 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4722 " using MSI, switching to INTx mode. Please"
4723 " report this failure to the PCI maintainer"
4724 " and include system chipset information.\n",
4725 bp->dev->name);
4726
4727 bnx2_disable_int(bp);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004728 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004729
4730 rc = bnx2_init_nic(bp);
4731
Michael Chan8e6a72c2007-05-03 13:24:48 -07004732 if (!rc)
4733 rc = bnx2_request_irq(bp);
4734
Michael Chanb6016b72005-05-26 13:03:09 -07004735 if (rc) {
4736 bnx2_free_skbs(bp);
4737 bnx2_free_mem(bp);
4738 del_timer_sync(&bp->timer);
4739 return rc;
4740 }
4741 bnx2_enable_int(bp);
4742 }
4743 }
4744 if (bp->flags & USING_MSI_FLAG) {
4745 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4746 }
4747
4748 netif_start_queue(dev);
4749
4750 return 0;
4751}
4752
4753static void
David Howellsc4028952006-11-22 14:57:56 +00004754bnx2_reset_task(struct work_struct *work)
Michael Chanb6016b72005-05-26 13:03:09 -07004755{
David Howellsc4028952006-11-22 14:57:56 +00004756 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07004757
Michael Chanafdc08b2005-08-25 15:34:29 -07004758 if (!netif_running(bp->dev))
4759 return;
4760
4761 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004762 bnx2_netif_stop(bp);
4763
4764 bnx2_init_nic(bp);
4765
4766 atomic_set(&bp->intr_sem, 1);
4767 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004768 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004769}
4770
4771static void
4772bnx2_tx_timeout(struct net_device *dev)
4773{
Michael Chan972ec0d2006-01-23 16:12:43 -08004774 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004775
4776 /* This allows the netif to be shutdown gracefully before resetting */
4777 schedule_work(&bp->reset_task);
4778}
4779
4780#ifdef BCM_VLAN
4781/* Called with rtnl_lock */
4782static void
4783bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4784{
Michael Chan972ec0d2006-01-23 16:12:43 -08004785 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004786
4787 bnx2_netif_stop(bp);
4788
4789 bp->vlgrp = vlgrp;
4790 bnx2_set_rx_mode(dev);
4791
4792 bnx2_netif_start(bp);
4793}
Michael Chanb6016b72005-05-26 13:03:09 -07004794#endif
4795
Herbert Xu932ff272006-06-09 12:20:56 -07004796/* Called with netif_tx_lock.
Michael Chan2f8af122006-08-15 01:39:10 -07004797 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4798 * netif_wake_queue().
Michael Chanb6016b72005-05-26 13:03:09 -07004799 */
4800static int
4801bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4802{
Michael Chan972ec0d2006-01-23 16:12:43 -08004803 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004804 dma_addr_t mapping;
4805 struct tx_bd *txbd;
4806 struct sw_bd *tx_buf;
4807 u32 len, vlan_tag_flags, last_frag, mss;
4808 u16 prod, ring_prod;
4809 int i;
4810
Michael Chane89bbf12005-08-25 15:36:58 -07004811 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004812 netif_stop_queue(dev);
4813 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4814 dev->name);
4815
4816 return NETDEV_TX_BUSY;
4817 }
4818 len = skb_headlen(skb);
4819 prod = bp->tx_prod;
4820 ring_prod = TX_RING_IDX(prod);
4821
4822 vlan_tag_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004823 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michael Chanb6016b72005-05-26 13:03:09 -07004824 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4825 }
4826
4827 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4828 vlan_tag_flags |=
4829 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4830 }
Michael Chanfde82052007-05-03 17:23:35 -07004831 if ((mss = skb_shinfo(skb)->gso_size)) {
Michael Chanb6016b72005-05-26 13:03:09 -07004832 u32 tcp_opt_len, ip_tcp_len;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004833 struct iphdr *iph;
Michael Chanb6016b72005-05-26 13:03:09 -07004834
Michael Chanb6016b72005-05-26 13:03:09 -07004835 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4836
Michael Chan4666f872007-05-03 13:22:28 -07004837 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004838
Michael Chan4666f872007-05-03 13:22:28 -07004839 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4840 u32 tcp_off = skb_transport_offset(skb) -
4841 sizeof(struct ipv6hdr) - ETH_HLEN;
Michael Chanb6016b72005-05-26 13:03:09 -07004842
Michael Chan4666f872007-05-03 13:22:28 -07004843 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4844 TX_BD_FLAGS_SW_FLAGS;
4845 if (likely(tcp_off == 0))
4846 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4847 else {
4848 tcp_off >>= 3;
4849 vlan_tag_flags |= ((tcp_off & 0x3) <<
4850 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4851 ((tcp_off & 0x10) <<
4852 TX_BD_FLAGS_TCP6_OFF4_SHL);
4853 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4854 }
4855 } else {
4856 if (skb_header_cloned(skb) &&
4857 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4858 dev_kfree_skb(skb);
4859 return NETDEV_TX_OK;
4860 }
4861
4862 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4863
4864 iph = ip_hdr(skb);
4865 iph->check = 0;
4866 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4867 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4868 iph->daddr, 0,
4869 IPPROTO_TCP,
4870 0);
4871 if (tcp_opt_len || (iph->ihl > 5)) {
4872 vlan_tag_flags |= ((iph->ihl - 5) +
4873 (tcp_opt_len >> 2)) << 8;
4874 }
Michael Chanb6016b72005-05-26 13:03:09 -07004875 }
Michael Chan4666f872007-05-03 13:22:28 -07004876 } else
Michael Chanb6016b72005-05-26 13:03:09 -07004877 mss = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004878
4879 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004880
Michael Chanb6016b72005-05-26 13:03:09 -07004881 tx_buf = &bp->tx_buf_ring[ring_prod];
4882 tx_buf->skb = skb;
4883 pci_unmap_addr_set(tx_buf, mapping, mapping);
4884
4885 txbd = &bp->tx_desc_ring[ring_prod];
4886
4887 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4888 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4889 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4890 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4891
4892 last_frag = skb_shinfo(skb)->nr_frags;
4893
4894 for (i = 0; i < last_frag; i++) {
4895 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4896
4897 prod = NEXT_TX_BD(prod);
4898 ring_prod = TX_RING_IDX(prod);
4899 txbd = &bp->tx_desc_ring[ring_prod];
4900
4901 len = frag->size;
4902 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4903 len, PCI_DMA_TODEVICE);
4904 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4905 mapping, mapping);
4906
4907 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4908 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4909 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4910 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4911
4912 }
4913 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4914
4915 prod = NEXT_TX_BD(prod);
4916 bp->tx_prod_bseq += skb->len;
4917
Michael Chan234754d2006-11-19 14:11:41 -08004918 REG_WR16(bp, bp->tx_bidx_addr, prod);
4919 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07004920
4921 mmiowb();
4922
4923 bp->tx_prod = prod;
4924 dev->trans_start = jiffies;
4925
Michael Chane89bbf12005-08-25 15:36:58 -07004926 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chane89bbf12005-08-25 15:36:58 -07004927 netif_stop_queue(dev);
Michael Chan2f8af122006-08-15 01:39:10 -07004928 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
Michael Chane89bbf12005-08-25 15:36:58 -07004929 netif_wake_queue(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004930 }
4931
4932 return NETDEV_TX_OK;
4933}
4934
4935/* Called with rtnl_lock */
4936static int
4937bnx2_close(struct net_device *dev)
4938{
Michael Chan972ec0d2006-01-23 16:12:43 -08004939 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004940 u32 reset_code;
4941
Michael Chanafdc08b2005-08-25 15:34:29 -07004942 /* Calling flush_scheduled_work() may deadlock because
4943 * linkwatch_event() may be on the workqueue and it will try to get
4944 * the rtnl_lock which we are holding.
4945 */
4946 while (bp->in_reset_task)
4947 msleep(1);
4948
Michael Chanb6016b72005-05-26 13:03:09 -07004949 bnx2_netif_stop(bp);
4950 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004951 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07004952 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08004953 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004954 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4955 else
4956 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4957 bnx2_reset_chip(bp, reset_code);
Michael Chan8e6a72c2007-05-03 13:24:48 -07004958 bnx2_free_irq(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07004959 bnx2_free_skbs(bp);
4960 bnx2_free_mem(bp);
4961 bp->link_up = 0;
4962 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004963 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004964 return 0;
4965}
4966
4967#define GET_NET_STATS64(ctr) \
4968 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4969 (unsigned long) (ctr##_lo)
4970
4971#define GET_NET_STATS32(ctr) \
4972 (ctr##_lo)
4973
4974#if (BITS_PER_LONG == 64)
4975#define GET_NET_STATS GET_NET_STATS64
4976#else
4977#define GET_NET_STATS GET_NET_STATS32
4978#endif
4979
4980static struct net_device_stats *
4981bnx2_get_stats(struct net_device *dev)
4982{
Michael Chan972ec0d2006-01-23 16:12:43 -08004983 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004984 struct statistics_block *stats_blk = bp->stats_blk;
4985 struct net_device_stats *net_stats = &bp->net_stats;
4986
4987 if (bp->stats_blk == NULL) {
4988 return net_stats;
4989 }
4990 net_stats->rx_packets =
4991 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4992 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4993 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4994
4995 net_stats->tx_packets =
4996 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4997 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4998 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4999
5000 net_stats->rx_bytes =
5001 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5002
5003 net_stats->tx_bytes =
5004 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5005
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005006 net_stats->multicast =
Michael Chanb6016b72005-05-26 13:03:09 -07005007 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5008
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005009 net_stats->collisions =
Michael Chanb6016b72005-05-26 13:03:09 -07005010 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5011
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005012 net_stats->rx_length_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005013 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5014 stats_blk->stat_EtherStatsOverrsizePkts);
5015
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005016 net_stats->rx_over_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005017 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5018
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005019 net_stats->rx_frame_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005020 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5021
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005022 net_stats->rx_crc_errors =
Michael Chanb6016b72005-05-26 13:03:09 -07005023 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5024
5025 net_stats->rx_errors = net_stats->rx_length_errors +
5026 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5027 net_stats->rx_crc_errors;
5028
5029 net_stats->tx_aborted_errors =
5030 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5031 stats_blk->stat_Dot3StatsLateCollisions);
5032
Michael Chan5b0c76a2005-11-04 08:45:49 -08005033 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5034 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005035 net_stats->tx_carrier_errors = 0;
5036 else {
5037 net_stats->tx_carrier_errors =
5038 (unsigned long)
5039 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5040 }
5041
5042 net_stats->tx_errors =
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005043 (unsigned long)
Michael Chanb6016b72005-05-26 13:03:09 -07005044 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5045 +
5046 net_stats->tx_aborted_errors +
5047 net_stats->tx_carrier_errors;
5048
Michael Chancea94db2006-06-12 22:16:13 -07005049 net_stats->rx_missed_errors =
5050 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5051 stats_blk->stat_FwRxDrop);
5052
Michael Chanb6016b72005-05-26 13:03:09 -07005053 return net_stats;
5054}
5055
5056/* All ethtool functions called with rtnl_lock */
5057
5058static int
5059bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5060{
Michael Chan972ec0d2006-01-23 16:12:43 -08005061 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005062
5063 cmd->supported = SUPPORTED_Autoneg;
5064 if (bp->phy_flags & PHY_SERDES_FLAG) {
5065 cmd->supported |= SUPPORTED_1000baseT_Full |
5066 SUPPORTED_FIBRE;
Michael Chan605a9e22007-05-03 13:23:13 -07005067 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5068 cmd->supported |= SUPPORTED_2500baseX_Full;
Michael Chanb6016b72005-05-26 13:03:09 -07005069
5070 cmd->port = PORT_FIBRE;
5071 }
5072 else {
5073 cmd->supported |= SUPPORTED_10baseT_Half |
5074 SUPPORTED_10baseT_Full |
5075 SUPPORTED_100baseT_Half |
5076 SUPPORTED_100baseT_Full |
5077 SUPPORTED_1000baseT_Full |
5078 SUPPORTED_TP;
5079
5080 cmd->port = PORT_TP;
5081 }
5082
5083 cmd->advertising = bp->advertising;
5084
5085 if (bp->autoneg & AUTONEG_SPEED) {
5086 cmd->autoneg = AUTONEG_ENABLE;
5087 }
5088 else {
5089 cmd->autoneg = AUTONEG_DISABLE;
5090 }
5091
5092 if (netif_carrier_ok(dev)) {
5093 cmd->speed = bp->line_speed;
5094 cmd->duplex = bp->duplex;
5095 }
5096 else {
5097 cmd->speed = -1;
5098 cmd->duplex = -1;
5099 }
5100
5101 cmd->transceiver = XCVR_INTERNAL;
5102 cmd->phy_address = bp->phy_addr;
5103
5104 return 0;
5105}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005106
Michael Chanb6016b72005-05-26 13:03:09 -07005107static int
5108bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5109{
Michael Chan972ec0d2006-01-23 16:12:43 -08005110 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005111 u8 autoneg = bp->autoneg;
5112 u8 req_duplex = bp->req_duplex;
5113 u16 req_line_speed = bp->req_line_speed;
5114 u32 advertising = bp->advertising;
5115
5116 if (cmd->autoneg == AUTONEG_ENABLE) {
5117 autoneg |= AUTONEG_SPEED;
5118
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005119 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
Michael Chanb6016b72005-05-26 13:03:09 -07005120
5121 /* allow advertising 1 speed */
5122 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5123 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5124 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5125 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5126
5127 if (bp->phy_flags & PHY_SERDES_FLAG)
5128 return -EINVAL;
5129
5130 advertising = cmd->advertising;
5131
Michael Chan27a005b2007-05-03 13:23:41 -07005132 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5133 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5134 return -EINVAL;
5135 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
Michael Chanb6016b72005-05-26 13:03:09 -07005136 advertising = cmd->advertising;
5137 }
5138 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5139 return -EINVAL;
5140 }
5141 else {
5142 if (bp->phy_flags & PHY_SERDES_FLAG) {
5143 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5144 }
5145 else {
5146 advertising = ETHTOOL_ALL_COPPER_SPEED;
5147 }
5148 }
5149 advertising |= ADVERTISED_Autoneg;
5150 }
5151 else {
5152 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan80be4432006-11-19 14:07:28 -08005153 if ((cmd->speed != SPEED_1000 &&
5154 cmd->speed != SPEED_2500) ||
5155 (cmd->duplex != DUPLEX_FULL))
Michael Chanb6016b72005-05-26 13:03:09 -07005156 return -EINVAL;
Michael Chan80be4432006-11-19 14:07:28 -08005157
5158 if (cmd->speed == SPEED_2500 &&
5159 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5160 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07005161 }
5162 else if (cmd->speed == SPEED_1000) {
5163 return -EINVAL;
5164 }
5165 autoneg &= ~AUTONEG_SPEED;
5166 req_line_speed = cmd->speed;
5167 req_duplex = cmd->duplex;
5168 advertising = 0;
5169 }
5170
5171 bp->autoneg = autoneg;
5172 bp->advertising = advertising;
5173 bp->req_line_speed = req_line_speed;
5174 bp->req_duplex = req_duplex;
5175
Michael Chanc770a652005-08-25 15:38:39 -07005176 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005177
5178 bnx2_setup_phy(bp);
5179
Michael Chanc770a652005-08-25 15:38:39 -07005180 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005181
5182 return 0;
5183}
5184
5185static void
5186bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5187{
Michael Chan972ec0d2006-01-23 16:12:43 -08005188 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005189
5190 strcpy(info->driver, DRV_MODULE_NAME);
5191 strcpy(info->version, DRV_MODULE_VERSION);
5192 strcpy(info->bus_info, pci_name(bp->pdev));
5193 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5194 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5195 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08005196 info->fw_version[1] = info->fw_version[3] = '.';
5197 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07005198}
5199
Michael Chan244ac4f2006-03-20 17:48:46 -08005200#define BNX2_REGDUMP_LEN (32 * 1024)
5201
5202static int
5203bnx2_get_regs_len(struct net_device *dev)
5204{
5205 return BNX2_REGDUMP_LEN;
5206}
5207
5208static void
5209bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5210{
5211 u32 *p = _p, i, offset;
5212 u8 *orig_p = _p;
5213 struct bnx2 *bp = netdev_priv(dev);
5214 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5215 0x0800, 0x0880, 0x0c00, 0x0c10,
5216 0x0c30, 0x0d08, 0x1000, 0x101c,
5217 0x1040, 0x1048, 0x1080, 0x10a4,
5218 0x1400, 0x1490, 0x1498, 0x14f0,
5219 0x1500, 0x155c, 0x1580, 0x15dc,
5220 0x1600, 0x1658, 0x1680, 0x16d8,
5221 0x1800, 0x1820, 0x1840, 0x1854,
5222 0x1880, 0x1894, 0x1900, 0x1984,
5223 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5224 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5225 0x2000, 0x2030, 0x23c0, 0x2400,
5226 0x2800, 0x2820, 0x2830, 0x2850,
5227 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5228 0x3c00, 0x3c94, 0x4000, 0x4010,
5229 0x4080, 0x4090, 0x43c0, 0x4458,
5230 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5231 0x4fc0, 0x5010, 0x53c0, 0x5444,
5232 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5233 0x5fc0, 0x6000, 0x6400, 0x6428,
5234 0x6800, 0x6848, 0x684c, 0x6860,
5235 0x6888, 0x6910, 0x8000 };
5236
5237 regs->version = 0;
5238
5239 memset(p, 0, BNX2_REGDUMP_LEN);
5240
5241 if (!netif_running(bp->dev))
5242 return;
5243
5244 i = 0;
5245 offset = reg_boundaries[0];
5246 p += offset;
5247 while (offset < BNX2_REGDUMP_LEN) {
5248 *p++ = REG_RD(bp, offset);
5249 offset += 4;
5250 if (offset == reg_boundaries[i + 1]) {
5251 offset = reg_boundaries[i + 2];
5252 p = (u32 *) (orig_p + offset);
5253 i += 2;
5254 }
5255 }
5256}
5257
Michael Chanb6016b72005-05-26 13:03:09 -07005258static void
5259bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5260{
Michael Chan972ec0d2006-01-23 16:12:43 -08005261 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005262
5263 if (bp->flags & NO_WOL_FLAG) {
5264 wol->supported = 0;
5265 wol->wolopts = 0;
5266 }
5267 else {
5268 wol->supported = WAKE_MAGIC;
5269 if (bp->wol)
5270 wol->wolopts = WAKE_MAGIC;
5271 else
5272 wol->wolopts = 0;
5273 }
5274 memset(&wol->sopass, 0, sizeof(wol->sopass));
5275}
5276
5277static int
5278bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5279{
Michael Chan972ec0d2006-01-23 16:12:43 -08005280 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005281
5282 if (wol->wolopts & ~WAKE_MAGIC)
5283 return -EINVAL;
5284
5285 if (wol->wolopts & WAKE_MAGIC) {
5286 if (bp->flags & NO_WOL_FLAG)
5287 return -EINVAL;
5288
5289 bp->wol = 1;
5290 }
5291 else {
5292 bp->wol = 0;
5293 }
5294 return 0;
5295}
5296
5297static int
5298bnx2_nway_reset(struct net_device *dev)
5299{
Michael Chan972ec0d2006-01-23 16:12:43 -08005300 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005301 u32 bmcr;
5302
5303 if (!(bp->autoneg & AUTONEG_SPEED)) {
5304 return -EINVAL;
5305 }
5306
Michael Chanc770a652005-08-25 15:38:39 -07005307 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005308
5309 /* Force a link down visible on the other side */
5310 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanca58c3a2007-05-03 13:22:52 -07005311 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07005312 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005313
5314 msleep(20);
5315
Michael Chanc770a652005-08-25 15:38:39 -07005316 spin_lock_bh(&bp->phy_lock);
Michael Chanf8dd0642006-11-19 14:08:29 -08005317
5318 bp->current_interval = SERDES_AN_TIMEOUT;
5319 bp->serdes_an_pending = 1;
5320 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07005321 }
5322
Michael Chanca58c3a2007-05-03 13:22:52 -07005323 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -07005324 bmcr &= ~BMCR_LOOPBACK;
Michael Chanca58c3a2007-05-03 13:22:52 -07005325 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
Michael Chanb6016b72005-05-26 13:03:09 -07005326
Michael Chanc770a652005-08-25 15:38:39 -07005327 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005328
5329 return 0;
5330}
5331
5332static int
5333bnx2_get_eeprom_len(struct net_device *dev)
5334{
Michael Chan972ec0d2006-01-23 16:12:43 -08005335 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005336
Michael Chan1122db72006-01-23 16:11:42 -08005337 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07005338 return 0;
5339
Michael Chan1122db72006-01-23 16:11:42 -08005340 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07005341}
5342
5343static int
5344bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5345 u8 *eebuf)
5346{
Michael Chan972ec0d2006-01-23 16:12:43 -08005347 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005348 int rc;
5349
John W. Linville1064e942005-11-10 12:58:24 -08005350 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005351
5352 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5353
5354 return rc;
5355}
5356
5357static int
5358bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5359 u8 *eebuf)
5360{
Michael Chan972ec0d2006-01-23 16:12:43 -08005361 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005362 int rc;
5363
John W. Linville1064e942005-11-10 12:58:24 -08005364 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07005365
5366 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5367
5368 return rc;
5369}
5370
5371static int
5372bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5373{
Michael Chan972ec0d2006-01-23 16:12:43 -08005374 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005375
5376 memset(coal, 0, sizeof(struct ethtool_coalesce));
5377
5378 coal->rx_coalesce_usecs = bp->rx_ticks;
5379 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5380 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5381 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5382
5383 coal->tx_coalesce_usecs = bp->tx_ticks;
5384 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5385 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5386 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5387
5388 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5389
5390 return 0;
5391}
5392
5393static int
5394bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5395{
Michael Chan972ec0d2006-01-23 16:12:43 -08005396 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005397
5398 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5399 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5400
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005401 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
Michael Chanb6016b72005-05-26 13:03:09 -07005402 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5403
5404 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5405 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5406
5407 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5408 if (bp->rx_quick_cons_trip_int > 0xff)
5409 bp->rx_quick_cons_trip_int = 0xff;
5410
5411 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5412 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5413
5414 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5415 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5416
5417 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5418 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5419
5420 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5421 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5422 0xff;
5423
5424 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5425 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5426 bp->stats_ticks &= 0xffff00;
5427
5428 if (netif_running(bp->dev)) {
5429 bnx2_netif_stop(bp);
5430 bnx2_init_nic(bp);
5431 bnx2_netif_start(bp);
5432 }
5433
5434 return 0;
5435}
5436
5437static void
5438bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5439{
Michael Chan972ec0d2006-01-23 16:12:43 -08005440 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005441
Michael Chan13daffa2006-03-20 17:49:20 -08005442 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
Michael Chanb6016b72005-05-26 13:03:09 -07005443 ering->rx_mini_max_pending = 0;
5444 ering->rx_jumbo_max_pending = 0;
5445
5446 ering->rx_pending = bp->rx_ring_size;
5447 ering->rx_mini_pending = 0;
5448 ering->rx_jumbo_pending = 0;
5449
5450 ering->tx_max_pending = MAX_TX_DESC_CNT;
5451 ering->tx_pending = bp->tx_ring_size;
5452}
5453
5454static int
5455bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5456{
Michael Chan972ec0d2006-01-23 16:12:43 -08005457 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005458
Michael Chan13daffa2006-03-20 17:49:20 -08005459 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
Michael Chanb6016b72005-05-26 13:03:09 -07005460 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5461 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5462
5463 return -EINVAL;
5464 }
Michael Chan13daffa2006-03-20 17:49:20 -08005465 if (netif_running(bp->dev)) {
5466 bnx2_netif_stop(bp);
5467 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5468 bnx2_free_skbs(bp);
5469 bnx2_free_mem(bp);
5470 }
5471
5472 bnx2_set_rx_ring_size(bp, ering->rx_pending);
Michael Chanb6016b72005-05-26 13:03:09 -07005473 bp->tx_ring_size = ering->tx_pending;
5474
5475 if (netif_running(bp->dev)) {
Michael Chan13daffa2006-03-20 17:49:20 -08005476 int rc;
5477
5478 rc = bnx2_alloc_mem(bp);
5479 if (rc)
5480 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07005481 bnx2_init_nic(bp);
5482 bnx2_netif_start(bp);
5483 }
5484
5485 return 0;
5486}
5487
5488static void
5489bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5490{
Michael Chan972ec0d2006-01-23 16:12:43 -08005491 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005492
5493 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5494 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5495 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5496}
5497
5498static int
5499bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5500{
Michael Chan972ec0d2006-01-23 16:12:43 -08005501 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005502
5503 bp->req_flow_ctrl = 0;
5504 if (epause->rx_pause)
5505 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5506 if (epause->tx_pause)
5507 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5508
5509 if (epause->autoneg) {
5510 bp->autoneg |= AUTONEG_FLOW_CTRL;
5511 }
5512 else {
5513 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5514 }
5515
Michael Chanc770a652005-08-25 15:38:39 -07005516 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005517
5518 bnx2_setup_phy(bp);
5519
Michael Chanc770a652005-08-25 15:38:39 -07005520 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005521
5522 return 0;
5523}
5524
5525static u32
5526bnx2_get_rx_csum(struct net_device *dev)
5527{
Michael Chan972ec0d2006-01-23 16:12:43 -08005528 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005529
5530 return bp->rx_csum;
5531}
5532
5533static int
5534bnx2_set_rx_csum(struct net_device *dev, u32 data)
5535{
Michael Chan972ec0d2006-01-23 16:12:43 -08005536 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005537
5538 bp->rx_csum = data;
5539 return 0;
5540}
5541
Michael Chanb11d6212006-06-29 12:31:21 -07005542static int
5543bnx2_set_tso(struct net_device *dev, u32 data)
5544{
Michael Chan4666f872007-05-03 13:22:28 -07005545 struct bnx2 *bp = netdev_priv(dev);
5546
5547 if (data) {
Michael Chanb11d6212006-06-29 12:31:21 -07005548 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07005549 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5550 dev->features |= NETIF_F_TSO6;
5551 } else
5552 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5553 NETIF_F_TSO_ECN);
Michael Chanb11d6212006-06-29 12:31:21 -07005554 return 0;
5555}
5556
Michael Chancea94db2006-06-12 22:16:13 -07005557#define BNX2_NUM_STATS 46
Michael Chanb6016b72005-05-26 13:03:09 -07005558
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005559static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005560 char string[ETH_GSTRING_LEN];
5561} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5562 { "rx_bytes" },
5563 { "rx_error_bytes" },
5564 { "tx_bytes" },
5565 { "tx_error_bytes" },
5566 { "rx_ucast_packets" },
5567 { "rx_mcast_packets" },
5568 { "rx_bcast_packets" },
5569 { "tx_ucast_packets" },
5570 { "tx_mcast_packets" },
5571 { "tx_bcast_packets" },
5572 { "tx_mac_errors" },
5573 { "tx_carrier_errors" },
5574 { "rx_crc_errors" },
5575 { "rx_align_errors" },
5576 { "tx_single_collisions" },
5577 { "tx_multi_collisions" },
5578 { "tx_deferred" },
5579 { "tx_excess_collisions" },
5580 { "tx_late_collisions" },
5581 { "tx_total_collisions" },
5582 { "rx_fragments" },
5583 { "rx_jabbers" },
5584 { "rx_undersize_packets" },
5585 { "rx_oversize_packets" },
5586 { "rx_64_byte_packets" },
5587 { "rx_65_to_127_byte_packets" },
5588 { "rx_128_to_255_byte_packets" },
5589 { "rx_256_to_511_byte_packets" },
5590 { "rx_512_to_1023_byte_packets" },
5591 { "rx_1024_to_1522_byte_packets" },
5592 { "rx_1523_to_9022_byte_packets" },
5593 { "tx_64_byte_packets" },
5594 { "tx_65_to_127_byte_packets" },
5595 { "tx_128_to_255_byte_packets" },
5596 { "tx_256_to_511_byte_packets" },
5597 { "tx_512_to_1023_byte_packets" },
5598 { "tx_1024_to_1522_byte_packets" },
5599 { "tx_1523_to_9022_byte_packets" },
5600 { "rx_xon_frames" },
5601 { "rx_xoff_frames" },
5602 { "tx_xon_frames" },
5603 { "tx_xoff_frames" },
5604 { "rx_mac_ctrl_frames" },
5605 { "rx_filtered_packets" },
5606 { "rx_discards" },
Michael Chancea94db2006-06-12 22:16:13 -07005607 { "rx_fw_discards" },
Michael Chanb6016b72005-05-26 13:03:09 -07005608};
5609
5610#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5611
Arjan van de Venf71e1302006-03-03 21:33:57 -05005612static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005613 STATS_OFFSET32(stat_IfHCInOctets_hi),
5614 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5615 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5616 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5617 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5618 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5619 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5620 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5621 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5622 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5623 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005624 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5625 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5626 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5627 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5628 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5629 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5630 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5631 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5632 STATS_OFFSET32(stat_EtherStatsCollisions),
5633 STATS_OFFSET32(stat_EtherStatsFragments),
5634 STATS_OFFSET32(stat_EtherStatsJabbers),
5635 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5636 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5637 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5638 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5639 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5640 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5641 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5642 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5643 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5644 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5645 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5646 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5647 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5648 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5649 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5650 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5651 STATS_OFFSET32(stat_XonPauseFramesReceived),
5652 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5653 STATS_OFFSET32(stat_OutXonSent),
5654 STATS_OFFSET32(stat_OutXoffSent),
5655 STATS_OFFSET32(stat_MacControlFramesReceived),
5656 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5657 STATS_OFFSET32(stat_IfInMBUFDiscards),
Michael Chancea94db2006-06-12 22:16:13 -07005658 STATS_OFFSET32(stat_FwRxDrop),
Michael Chanb6016b72005-05-26 13:03:09 -07005659};
5660
5661/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5662 * skipped because of errata.
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005663 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005664static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07005665 8,0,8,8,8,8,8,8,8,8,
5666 4,0,4,4,4,4,4,4,4,4,
5667 4,4,4,4,4,4,4,4,4,4,
5668 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005669 4,4,4,4,4,4,
Michael Chanb6016b72005-05-26 13:03:09 -07005670};
5671
Michael Chan5b0c76a2005-11-04 08:45:49 -08005672static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5673 8,0,8,8,8,8,8,8,8,8,
5674 4,4,4,4,4,4,4,4,4,4,
5675 4,4,4,4,4,4,4,4,4,4,
5676 4,4,4,4,4,4,4,4,4,4,
Michael Chancea94db2006-06-12 22:16:13 -07005677 4,4,4,4,4,4,
Michael Chan5b0c76a2005-11-04 08:45:49 -08005678};
5679
Michael Chanb6016b72005-05-26 13:03:09 -07005680#define BNX2_NUM_TESTS 6
5681
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005682static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07005683 char string[ETH_GSTRING_LEN];
5684} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5685 { "register_test (offline)" },
5686 { "memory_test (offline)" },
5687 { "loopback_test (offline)" },
5688 { "nvram_test (online)" },
5689 { "interrupt_test (online)" },
5690 { "link_test (online)" },
5691};
5692
5693static int
5694bnx2_self_test_count(struct net_device *dev)
5695{
5696 return BNX2_NUM_TESTS;
5697}
5698
5699static void
5700bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5701{
Michael Chan972ec0d2006-01-23 16:12:43 -08005702 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005703
5704 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5705 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chan80be4432006-11-19 14:07:28 -08005706 int i;
5707
Michael Chanb6016b72005-05-26 13:03:09 -07005708 bnx2_netif_stop(bp);
5709 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5710 bnx2_free_skbs(bp);
5711
5712 if (bnx2_test_registers(bp) != 0) {
5713 buf[0] = 1;
5714 etest->flags |= ETH_TEST_FL_FAILED;
5715 }
5716 if (bnx2_test_memory(bp) != 0) {
5717 buf[1] = 1;
5718 etest->flags |= ETH_TEST_FL_FAILED;
5719 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005720 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005721 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005722
5723 if (!netif_running(bp->dev)) {
5724 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5725 }
5726 else {
5727 bnx2_init_nic(bp);
5728 bnx2_netif_start(bp);
5729 }
5730
5731 /* wait for link up */
Michael Chan80be4432006-11-19 14:07:28 -08005732 for (i = 0; i < 7; i++) {
5733 if (bp->link_up)
5734 break;
5735 msleep_interruptible(1000);
5736 }
Michael Chanb6016b72005-05-26 13:03:09 -07005737 }
5738
5739 if (bnx2_test_nvram(bp) != 0) {
5740 buf[3] = 1;
5741 etest->flags |= ETH_TEST_FL_FAILED;
5742 }
5743 if (bnx2_test_intr(bp) != 0) {
5744 buf[4] = 1;
5745 etest->flags |= ETH_TEST_FL_FAILED;
5746 }
5747
5748 if (bnx2_test_link(bp) != 0) {
5749 buf[5] = 1;
5750 etest->flags |= ETH_TEST_FL_FAILED;
5751
5752 }
5753}
5754
5755static void
5756bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5757{
5758 switch (stringset) {
5759 case ETH_SS_STATS:
5760 memcpy(buf, bnx2_stats_str_arr,
5761 sizeof(bnx2_stats_str_arr));
5762 break;
5763 case ETH_SS_TEST:
5764 memcpy(buf, bnx2_tests_str_arr,
5765 sizeof(bnx2_tests_str_arr));
5766 break;
5767 }
5768}
5769
5770static int
5771bnx2_get_stats_count(struct net_device *dev)
5772{
5773 return BNX2_NUM_STATS;
5774}
5775
5776static void
5777bnx2_get_ethtool_stats(struct net_device *dev,
5778 struct ethtool_stats *stats, u64 *buf)
5779{
Michael Chan972ec0d2006-01-23 16:12:43 -08005780 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005781 int i;
5782 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005783 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005784
5785 if (hw_stats == NULL) {
5786 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5787 return;
5788 }
5789
Michael Chan5b0c76a2005-11-04 08:45:49 -08005790 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5791 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5792 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5793 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005794 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005795 else
5796 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005797
5798 for (i = 0; i < BNX2_NUM_STATS; i++) {
5799 if (stats_len_arr[i] == 0) {
5800 /* skip this counter */
5801 buf[i] = 0;
5802 continue;
5803 }
5804 if (stats_len_arr[i] == 4) {
5805 /* 4-byte counter */
5806 buf[i] = (u64)
5807 *(hw_stats + bnx2_stats_offset_arr[i]);
5808 continue;
5809 }
5810 /* 8-byte counter */
5811 buf[i] = (((u64) *(hw_stats +
5812 bnx2_stats_offset_arr[i])) << 32) +
5813 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5814 }
5815}
5816
5817static int
5818bnx2_phys_id(struct net_device *dev, u32 data)
5819{
Michael Chan972ec0d2006-01-23 16:12:43 -08005820 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005821 int i;
5822 u32 save;
5823
5824 if (data == 0)
5825 data = 2;
5826
5827 save = REG_RD(bp, BNX2_MISC_CFG);
5828 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5829
5830 for (i = 0; i < (data * 2); i++) {
5831 if ((i % 2) == 0) {
5832 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5833 }
5834 else {
5835 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5836 BNX2_EMAC_LED_1000MB_OVERRIDE |
5837 BNX2_EMAC_LED_100MB_OVERRIDE |
5838 BNX2_EMAC_LED_10MB_OVERRIDE |
5839 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5840 BNX2_EMAC_LED_TRAFFIC);
5841 }
5842 msleep_interruptible(500);
5843 if (signal_pending(current))
5844 break;
5845 }
5846 REG_WR(bp, BNX2_EMAC_LED, 0);
5847 REG_WR(bp, BNX2_MISC_CFG, save);
5848 return 0;
5849}
5850
Michael Chan4666f872007-05-03 13:22:28 -07005851static int
5852bnx2_set_tx_csum(struct net_device *dev, u32 data)
5853{
5854 struct bnx2 *bp = netdev_priv(dev);
5855
5856 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5857 return (ethtool_op_set_tx_hw_csum(dev, data));
5858 else
5859 return (ethtool_op_set_tx_csum(dev, data));
5860}
5861
Jeff Garzik7282d492006-09-13 14:30:00 -04005862static const struct ethtool_ops bnx2_ethtool_ops = {
Michael Chanb6016b72005-05-26 13:03:09 -07005863 .get_settings = bnx2_get_settings,
5864 .set_settings = bnx2_set_settings,
5865 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005866 .get_regs_len = bnx2_get_regs_len,
5867 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005868 .get_wol = bnx2_get_wol,
5869 .set_wol = bnx2_set_wol,
5870 .nway_reset = bnx2_nway_reset,
5871 .get_link = ethtool_op_get_link,
5872 .get_eeprom_len = bnx2_get_eeprom_len,
5873 .get_eeprom = bnx2_get_eeprom,
5874 .set_eeprom = bnx2_set_eeprom,
5875 .get_coalesce = bnx2_get_coalesce,
5876 .set_coalesce = bnx2_set_coalesce,
5877 .get_ringparam = bnx2_get_ringparam,
5878 .set_ringparam = bnx2_set_ringparam,
5879 .get_pauseparam = bnx2_get_pauseparam,
5880 .set_pauseparam = bnx2_set_pauseparam,
5881 .get_rx_csum = bnx2_get_rx_csum,
5882 .set_rx_csum = bnx2_set_rx_csum,
5883 .get_tx_csum = ethtool_op_get_tx_csum,
Michael Chan4666f872007-05-03 13:22:28 -07005884 .set_tx_csum = bnx2_set_tx_csum,
Michael Chanb6016b72005-05-26 13:03:09 -07005885 .get_sg = ethtool_op_get_sg,
5886 .set_sg = ethtool_op_set_sg,
Michael Chanb6016b72005-05-26 13:03:09 -07005887 .get_tso = ethtool_op_get_tso,
Michael Chanb11d6212006-06-29 12:31:21 -07005888 .set_tso = bnx2_set_tso,
Michael Chanb6016b72005-05-26 13:03:09 -07005889 .self_test_count = bnx2_self_test_count,
5890 .self_test = bnx2_self_test,
5891 .get_strings = bnx2_get_strings,
5892 .phys_id = bnx2_phys_id,
5893 .get_stats_count = bnx2_get_stats_count,
5894 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005895 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005896};
5897
5898/* Called with rtnl_lock */
5899static int
5900bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5901{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005902 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005903 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005904 int err;
5905
5906 switch(cmd) {
5907 case SIOCGMIIPHY:
5908 data->phy_id = bp->phy_addr;
5909
5910 /* fallthru */
5911 case SIOCGMIIREG: {
5912 u32 mii_regval;
5913
Michael Chandad3e452007-05-03 13:18:03 -07005914 if (!netif_running(dev))
5915 return -EAGAIN;
5916
Michael Chanc770a652005-08-25 15:38:39 -07005917 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005918 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005919 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005920
5921 data->val_out = mii_regval;
5922
5923 return err;
5924 }
5925
5926 case SIOCSMIIREG:
5927 if (!capable(CAP_NET_ADMIN))
5928 return -EPERM;
5929
Michael Chandad3e452007-05-03 13:18:03 -07005930 if (!netif_running(dev))
5931 return -EAGAIN;
5932
Michael Chanc770a652005-08-25 15:38:39 -07005933 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005934 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005935 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005936
5937 return err;
5938
5939 default:
5940 /* do nothing */
5941 break;
5942 }
5943 return -EOPNOTSUPP;
5944}
5945
5946/* Called with rtnl_lock */
5947static int
5948bnx2_change_mac_addr(struct net_device *dev, void *p)
5949{
5950 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005951 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005952
Michael Chan73eef4c2005-08-25 15:39:15 -07005953 if (!is_valid_ether_addr(addr->sa_data))
5954 return -EINVAL;
5955
Michael Chanb6016b72005-05-26 13:03:09 -07005956 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5957 if (netif_running(dev))
5958 bnx2_set_mac_addr(bp);
5959
5960 return 0;
5961}
5962
5963/* Called with rtnl_lock */
5964static int
5965bnx2_change_mtu(struct net_device *dev, int new_mtu)
5966{
Michael Chan972ec0d2006-01-23 16:12:43 -08005967 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005968
5969 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5970 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5971 return -EINVAL;
5972
5973 dev->mtu = new_mtu;
5974 if (netif_running(dev)) {
5975 bnx2_netif_stop(bp);
5976
5977 bnx2_init_nic(bp);
5978
5979 bnx2_netif_start(bp);
5980 }
5981 return 0;
5982}
5983
5984#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5985static void
5986poll_bnx2(struct net_device *dev)
5987{
Michael Chan972ec0d2006-01-23 16:12:43 -08005988 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005989
5990 disable_irq(bp->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005991 bnx2_interrupt(bp->pdev->irq, dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005992 enable_irq(bp->pdev->irq);
5993}
5994#endif
5995
Michael Chan253c8b72007-01-08 19:56:01 -08005996static void __devinit
5997bnx2_get_5709_media(struct bnx2 *bp)
5998{
5999 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6000 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6001 u32 strap;
6002
6003 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6004 return;
6005 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6006 bp->phy_flags |= PHY_SERDES_FLAG;
6007 return;
6008 }
6009
6010 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6011 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6012 else
6013 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6014
6015 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6016 switch (strap) {
6017 case 0x4:
6018 case 0x5:
6019 case 0x6:
6020 bp->phy_flags |= PHY_SERDES_FLAG;
6021 return;
6022 }
6023 } else {
6024 switch (strap) {
6025 case 0x1:
6026 case 0x2:
6027 case 0x4:
6028 bp->phy_flags |= PHY_SERDES_FLAG;
6029 return;
6030 }
6031 }
6032}
6033
Michael Chan883e5152007-05-03 13:25:11 -07006034static void __devinit
6035bnx2_get_pci_speed(struct bnx2 *bp)
6036{
6037 u32 reg;
6038
6039 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6040 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6041 u32 clkreg;
6042
6043 bp->flags |= PCIX_FLAG;
6044
6045 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6046
6047 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6048 switch (clkreg) {
6049 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6050 bp->bus_speed_mhz = 133;
6051 break;
6052
6053 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6054 bp->bus_speed_mhz = 100;
6055 break;
6056
6057 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6058 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6059 bp->bus_speed_mhz = 66;
6060 break;
6061
6062 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6063 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6064 bp->bus_speed_mhz = 50;
6065 break;
6066
6067 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6068 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6069 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6070 bp->bus_speed_mhz = 33;
6071 break;
6072 }
6073 }
6074 else {
6075 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6076 bp->bus_speed_mhz = 66;
6077 else
6078 bp->bus_speed_mhz = 33;
6079 }
6080
6081 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6082 bp->flags |= PCI_32BIT_FLAG;
6083
6084}
6085
Michael Chanb6016b72005-05-26 13:03:09 -07006086static int __devinit
6087bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6088{
6089 struct bnx2 *bp;
6090 unsigned long mem_len;
6091 int rc;
6092 u32 reg;
Michael Chan40453c82007-05-03 13:19:18 -07006093 u64 dma_mask, persist_dma_mask;
Michael Chanb6016b72005-05-26 13:03:09 -07006094
6095 SET_MODULE_OWNER(dev);
6096 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006097 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006098
6099 bp->flags = 0;
6100 bp->phy_flags = 0;
6101
6102 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6103 rc = pci_enable_device(pdev);
6104 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006105 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
Michael Chanb6016b72005-05-26 13:03:09 -07006106 goto err_out;
6107 }
6108
6109 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006110 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006111 "Cannot find PCI device base address, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006112 rc = -ENODEV;
6113 goto err_out_disable;
6114 }
6115
6116 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6117 if (rc) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006118 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006119 goto err_out_disable;
6120 }
6121
6122 pci_set_master(pdev);
6123
6124 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6125 if (bp->pm_cap == 0) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006126 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006127 "Cannot find power management capability, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006128 rc = -EIO;
6129 goto err_out_release;
6130 }
6131
Michael Chanb6016b72005-05-26 13:03:09 -07006132 bp->dev = dev;
6133 bp->pdev = pdev;
6134
6135 spin_lock_init(&bp->phy_lock);
Michael Chan1b8227c2007-05-03 13:24:05 -07006136 spin_lock_init(&bp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +00006137 INIT_WORK(&bp->reset_task, bnx2_reset_task);
Michael Chanb6016b72005-05-26 13:03:09 -07006138
6139 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
Michael Chan59b47d82006-11-19 14:10:45 -08006140 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
Michael Chanb6016b72005-05-26 13:03:09 -07006141 dev->mem_end = dev->mem_start + mem_len;
6142 dev->irq = pdev->irq;
6143
6144 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6145
6146 if (!bp->regview) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006147 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006148 rc = -ENOMEM;
6149 goto err_out_release;
6150 }
6151
6152 /* Configure byte swap and enable write to the reg_window registers.
6153 * Rely on CPU to do target byte swapping on big endian systems
6154 * The chip's target access swapping will not swap all accesses
6155 */
6156 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6157 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6158 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6159
Pavel Machek829ca9a2005-09-03 15:56:56 -07006160 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006161
6162 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6163
Michael Chan883e5152007-05-03 13:25:11 -07006164 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6165 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6166 dev_err(&pdev->dev,
6167 "Cannot find PCIE capability, aborting.\n");
6168 rc = -EIO;
6169 goto err_out_unmap;
6170 }
6171 bp->flags |= PCIE_FLAG;
6172 } else {
Michael Chan59b47d82006-11-19 14:10:45 -08006173 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6174 if (bp->pcix_cap == 0) {
6175 dev_err(&pdev->dev,
6176 "Cannot find PCIX capability, aborting.\n");
6177 rc = -EIO;
6178 goto err_out_unmap;
6179 }
6180 }
6181
Michael Chan8e6a72c2007-05-03 13:24:48 -07006182 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6183 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6184 bp->flags |= MSI_CAP_FLAG;
6185 }
6186
Michael Chan40453c82007-05-03 13:19:18 -07006187 /* 5708 cannot support DMA addresses > 40-bit. */
6188 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6189 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6190 else
6191 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6192
6193 /* Configure DMA attributes. */
6194 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6195 dev->features |= NETIF_F_HIGHDMA;
6196 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6197 if (rc) {
6198 dev_err(&pdev->dev,
6199 "pci_set_consistent_dma_mask failed, aborting.\n");
6200 goto err_out_unmap;
6201 }
6202 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6203 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6204 goto err_out_unmap;
6205 }
6206
Michael Chan883e5152007-05-03 13:25:11 -07006207 if (!(bp->flags & PCIE_FLAG))
6208 bnx2_get_pci_speed(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07006209
6210 /* 5706A0 may falsely detect SERR and PERR. */
6211 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6212 reg = REG_RD(bp, PCI_COMMAND);
6213 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6214 REG_WR(bp, PCI_COMMAND, reg);
6215 }
6216 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6217 !(bp->flags & PCIX_FLAG)) {
6218
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006219 dev_err(&pdev->dev,
Jeff Garzik2e8a5382006-06-27 10:47:51 -04006220 "5706 A1 can only be used in a PCIX bus, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006221 goto err_out_unmap;
6222 }
6223
6224 bnx2_init_nvram(bp);
6225
Michael Chane3648b32005-11-04 08:51:21 -08006226 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6227
6228 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
Michael Chan24cb2302007-01-25 15:49:56 -08006229 BNX2_SHM_HDR_SIGNATURE_SIG) {
6230 u32 off = PCI_FUNC(pdev->devfn) << 2;
6231
6232 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6233 } else
Michael Chane3648b32005-11-04 08:51:21 -08006234 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6235
Michael Chanb6016b72005-05-26 13:03:09 -07006236 /* Get the permanent MAC address. First we need to make sure the
6237 * firmware is actually running.
6238 */
Michael Chane3648b32005-11-04 08:51:21 -08006239 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07006240
6241 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6242 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006243 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006244 rc = -ENODEV;
6245 goto err_out_unmap;
6246 }
6247
Michael Chane3648b32005-11-04 08:51:21 -08006248 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07006249
Michael Chane3648b32005-11-04 08:51:21 -08006250 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07006251 bp->mac_addr[0] = (u8) (reg >> 8);
6252 bp->mac_addr[1] = (u8) reg;
6253
Michael Chane3648b32005-11-04 08:51:21 -08006254 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07006255 bp->mac_addr[2] = (u8) (reg >> 24);
6256 bp->mac_addr[3] = (u8) (reg >> 16);
6257 bp->mac_addr[4] = (u8) (reg >> 8);
6258 bp->mac_addr[5] = (u8) reg;
6259
6260 bp->tx_ring_size = MAX_TX_DESC_CNT;
Michael Chan932f3772006-08-15 01:39:36 -07006261 bnx2_set_rx_ring_size(bp, 255);
Michael Chanb6016b72005-05-26 13:03:09 -07006262
6263 bp->rx_csum = 1;
6264
6265 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6266
6267 bp->tx_quick_cons_trip_int = 20;
6268 bp->tx_quick_cons_trip = 20;
6269 bp->tx_ticks_int = 80;
6270 bp->tx_ticks = 80;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04006271
Michael Chanb6016b72005-05-26 13:03:09 -07006272 bp->rx_quick_cons_trip_int = 6;
6273 bp->rx_quick_cons_trip = 6;
6274 bp->rx_ticks_int = 18;
6275 bp->rx_ticks = 18;
6276
6277 bp->stats_ticks = 1000000 & 0xffff00;
6278
6279 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07006280 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07006281
Michael Chan5b0c76a2005-11-04 08:45:49 -08006282 bp->phy_addr = 1;
6283
Michael Chanb6016b72005-05-26 13:03:09 -07006284 /* Disable WOL support if we are running on a SERDES chip. */
Michael Chan253c8b72007-01-08 19:56:01 -08006285 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6286 bnx2_get_5709_media(bp);
6287 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
Michael Chanb6016b72005-05-26 13:03:09 -07006288 bp->phy_flags |= PHY_SERDES_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006289
6290 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chanb6016b72005-05-26 13:03:09 -07006291 bp->flags |= NO_WOL_FLAG;
Michael Chanbac0dff2006-11-19 14:15:05 -08006292 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08006293 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08006294 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08006295 BNX2_SHARED_HW_CFG_CONFIG);
6296 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6297 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6298 }
Michael Chan261dd5c2007-01-08 19:55:46 -08006299 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6300 CHIP_NUM(bp) == CHIP_NUM_5708)
6301 bp->phy_flags |= PHY_CRC_FIX_FLAG;
Michael Chanb659f442007-02-02 00:46:35 -08006302 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6303 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
Michael Chanb6016b72005-05-26 13:03:09 -07006304
Michael Chan16088272006-06-12 22:16:43 -07006305 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6306 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6307 (CHIP_ID(bp) == CHIP_ID_5708_B1))
Michael Chandda1e392006-01-23 16:08:14 -08006308 bp->flags |= NO_WOL_FLAG;
6309
Michael Chanb6016b72005-05-26 13:03:09 -07006310 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6311 bp->tx_quick_cons_trip_int =
6312 bp->tx_quick_cons_trip;
6313 bp->tx_ticks_int = bp->tx_ticks;
6314 bp->rx_quick_cons_trip_int =
6315 bp->rx_quick_cons_trip;
6316 bp->rx_ticks_int = bp->rx_ticks;
6317 bp->comp_prod_trip_int = bp->comp_prod_trip;
6318 bp->com_ticks_int = bp->com_ticks;
6319 bp->cmd_ticks_int = bp->cmd_ticks;
6320 }
6321
Michael Chanf9317a42006-09-29 17:06:23 -07006322 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6323 *
6324 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6325 * with byte enables disabled on the unused 32-bit word. This is legal
6326 * but causes problems on the AMD 8132 which will eventually stop
6327 * responding after a while.
6328 *
6329 * AMD believes this incompatibility is unique to the 5706, and
Michael Ellerman88187df2007-01-25 19:34:07 +11006330 * prefers to locally disable MSI rather than globally disabling it.
Michael Chanf9317a42006-09-29 17:06:23 -07006331 */
6332 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6333 struct pci_dev *amd_8132 = NULL;
6334
6335 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6336 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6337 amd_8132))) {
6338 u8 rev;
6339
6340 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6341 if (rev >= 0x10 && rev <= 0x13) {
6342 disable_msi = 1;
6343 pci_dev_put(amd_8132);
6344 break;
6345 }
6346 }
6347 }
6348
Michael Chanb6016b72005-05-26 13:03:09 -07006349 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6350 bp->req_line_speed = 0;
6351 if (bp->phy_flags & PHY_SERDES_FLAG) {
6352 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07006353
Michael Chane3648b32005-11-04 08:51:21 -08006354 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07006355 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6356 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6357 bp->autoneg = 0;
6358 bp->req_line_speed = bp->line_speed = SPEED_1000;
6359 bp->req_duplex = DUPLEX_FULL;
6360 }
Michael Chanb6016b72005-05-26 13:03:09 -07006361 }
6362 else {
6363 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6364 }
6365
6366 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6367
Michael Chancd339a02005-08-25 15:35:24 -07006368 init_timer(&bp->timer);
6369 bp->timer.expires = RUN_AT(bp->timer_interval);
6370 bp->timer.data = (unsigned long) bp;
6371 bp->timer.function = bnx2_timer;
6372
Michael Chanb6016b72005-05-26 13:03:09 -07006373 return 0;
6374
6375err_out_unmap:
6376 if (bp->regview) {
6377 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07006378 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07006379 }
6380
6381err_out_release:
6382 pci_release_regions(pdev);
6383
6384err_out_disable:
6385 pci_disable_device(pdev);
6386 pci_set_drvdata(pdev, NULL);
6387
6388err_out:
6389 return rc;
6390}
6391
Michael Chan883e5152007-05-03 13:25:11 -07006392static char * __devinit
6393bnx2_bus_string(struct bnx2 *bp, char *str)
6394{
6395 char *s = str;
6396
6397 if (bp->flags & PCIE_FLAG) {
6398 s += sprintf(s, "PCI Express");
6399 } else {
6400 s += sprintf(s, "PCI");
6401 if (bp->flags & PCIX_FLAG)
6402 s += sprintf(s, "-X");
6403 if (bp->flags & PCI_32BIT_FLAG)
6404 s += sprintf(s, " 32-bit");
6405 else
6406 s += sprintf(s, " 64-bit");
6407 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6408 }
6409 return str;
6410}
6411
Michael Chanb6016b72005-05-26 13:03:09 -07006412static int __devinit
6413bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6414{
6415 static int version_printed = 0;
6416 struct net_device *dev = NULL;
6417 struct bnx2 *bp;
6418 int rc, i;
Michael Chan883e5152007-05-03 13:25:11 -07006419 char str[40];
Michael Chanb6016b72005-05-26 13:03:09 -07006420
6421 if (version_printed++ == 0)
6422 printk(KERN_INFO "%s", version);
6423
6424 /* dev zeroed in init_etherdev */
6425 dev = alloc_etherdev(sizeof(*bp));
6426
6427 if (!dev)
6428 return -ENOMEM;
6429
6430 rc = bnx2_init_board(pdev, dev);
6431 if (rc < 0) {
6432 free_netdev(dev);
6433 return rc;
6434 }
6435
6436 dev->open = bnx2_open;
6437 dev->hard_start_xmit = bnx2_start_xmit;
6438 dev->stop = bnx2_close;
6439 dev->get_stats = bnx2_get_stats;
6440 dev->set_multicast_list = bnx2_set_rx_mode;
6441 dev->do_ioctl = bnx2_ioctl;
6442 dev->set_mac_address = bnx2_change_mac_addr;
6443 dev->change_mtu = bnx2_change_mtu;
6444 dev->tx_timeout = bnx2_tx_timeout;
6445 dev->watchdog_timeo = TX_TIMEOUT;
6446#ifdef BCM_VLAN
6447 dev->vlan_rx_register = bnx2_vlan_rx_register;
Michael Chanb6016b72005-05-26 13:03:09 -07006448#endif
6449 dev->poll = bnx2_poll;
6450 dev->ethtool_ops = &bnx2_ethtool_ops;
6451 dev->weight = 64;
6452
Michael Chan972ec0d2006-01-23 16:12:43 -08006453 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006454
6455#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6456 dev->poll_controller = poll_bnx2;
6457#endif
6458
Michael Chan1b2f9222007-05-03 13:20:19 -07006459 pci_set_drvdata(pdev, dev);
6460
6461 memcpy(dev->dev_addr, bp->mac_addr, 6);
6462 memcpy(dev->perm_addr, bp->mac_addr, 6);
6463 bp->name = board_info[ent->driver_data].name;
6464
Michael Chan4666f872007-05-03 13:22:28 -07006465 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6466 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6467 else
6468 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Michael Chan1b2f9222007-05-03 13:20:19 -07006469#ifdef BCM_VLAN
6470 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6471#endif
6472 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
Michael Chan4666f872007-05-03 13:22:28 -07006473 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6474 dev->features |= NETIF_F_TSO6;
Michael Chan1b2f9222007-05-03 13:20:19 -07006475
Michael Chanb6016b72005-05-26 13:03:09 -07006476 if ((rc = register_netdev(dev))) {
Jeff Garzik9b91cf92006-06-27 11:39:50 -04006477 dev_err(&pdev->dev, "Cannot register net device\n");
Michael Chanb6016b72005-05-26 13:03:09 -07006478 if (bp->regview)
6479 iounmap(bp->regview);
6480 pci_release_regions(pdev);
6481 pci_disable_device(pdev);
6482 pci_set_drvdata(pdev, NULL);
6483 free_netdev(dev);
6484 return rc;
6485 }
6486
Michael Chan883e5152007-05-03 13:25:11 -07006487 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
Michael Chanb6016b72005-05-26 13:03:09 -07006488 "IRQ %d, ",
6489 dev->name,
6490 bp->name,
6491 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6492 ((CHIP_ID(bp) & 0x0ff0) >> 4),
Michael Chan883e5152007-05-03 13:25:11 -07006493 bnx2_bus_string(bp, str),
Michael Chanb6016b72005-05-26 13:03:09 -07006494 dev->base_addr,
6495 bp->pdev->irq);
6496
6497 printk("node addr ");
6498 for (i = 0; i < 6; i++)
6499 printk("%2.2x", dev->dev_addr[i]);
6500 printk("\n");
6501
Michael Chanb6016b72005-05-26 13:03:09 -07006502 return 0;
6503}
6504
6505static void __devexit
6506bnx2_remove_one(struct pci_dev *pdev)
6507{
6508 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006509 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006510
Michael Chanafdc08b2005-08-25 15:34:29 -07006511 flush_scheduled_work();
6512
Michael Chanb6016b72005-05-26 13:03:09 -07006513 unregister_netdev(dev);
6514
6515 if (bp->regview)
6516 iounmap(bp->regview);
6517
6518 free_netdev(dev);
6519 pci_release_regions(pdev);
6520 pci_disable_device(pdev);
6521 pci_set_drvdata(pdev, NULL);
6522}
6523
6524static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07006525bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07006526{
6527 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006528 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006529 u32 reset_code;
6530
6531 if (!netif_running(dev))
6532 return 0;
6533
Michael Chan1d602902006-03-20 17:50:08 -08006534 flush_scheduled_work();
Michael Chanb6016b72005-05-26 13:03:09 -07006535 bnx2_netif_stop(bp);
6536 netif_device_detach(dev);
6537 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08006538 if (bp->flags & NO_WOL_FLAG)
Michael Chan6c4f0952006-06-29 12:38:15 -07006539 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
Michael Chandda1e392006-01-23 16:08:14 -08006540 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07006541 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6542 else
6543 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6544 bnx2_reset_chip(bp, reset_code);
6545 bnx2_free_skbs(bp);
Michael Chan30c517b2007-05-03 13:20:40 -07006546 pci_save_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006547 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07006548 return 0;
6549}
6550
6551static int
6552bnx2_resume(struct pci_dev *pdev)
6553{
6554 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08006555 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07006556
6557 if (!netif_running(dev))
6558 return 0;
6559
Michael Chan30c517b2007-05-03 13:20:40 -07006560 pci_restore_state(pdev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07006561 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07006562 netif_device_attach(dev);
6563 bnx2_init_nic(bp);
6564 bnx2_netif_start(bp);
6565 return 0;
6566}
6567
6568static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07006569 .name = DRV_MODULE_NAME,
6570 .id_table = bnx2_pci_tbl,
6571 .probe = bnx2_init_one,
6572 .remove = __devexit_p(bnx2_remove_one),
6573 .suspend = bnx2_suspend,
6574 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07006575};
6576
6577static int __init bnx2_init(void)
6578{
Jeff Garzik29917622006-08-19 17:48:59 -04006579 return pci_register_driver(&bnx2_pci_driver);
Michael Chanb6016b72005-05-26 13:03:09 -07006580}
6581
6582static void __exit bnx2_cleanup(void)
6583{
6584 pci_unregister_driver(&bnx2_pci_driver);
6585}
6586
6587module_init(bnx2_init);
6588module_exit(bnx2_cleanup);
6589
6590
6591