1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
580 " spq_prod_idx(%u)\n",
581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586 BNX2X_ERR("end crash dump -----------------\n");
589 static void bnx2x_int_enable(struct bnx2x *bp)
591 int port = BP_PORT(bp);
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
609 REG_WR(bp, addr, val);
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
615 val, port, addr, msix);
617 REG_WR(bp, addr, val);
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
624 /* enable nig attention */
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
634 static void bnx2x_int_disable(struct bnx2x *bp)
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 /* disable interrupt handling */
659 atomic_inc(&bp->intr_sem);
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
663 /* make sure all ISRs are done */
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
671 synchronize_irq(bp->pdev->irq);
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_buf->first_bd = 0;
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
818 #ifdef BNX2X_STOP_ON_ERROR
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 return (s16)(fp->bp->tx_ring_size) - used;
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
841 while (sw_cons != hw_cons) {
844 pkt_cons = TX_BD(sw_cons);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
877 netif_tx_lock(bp->dev);
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
884 netif_tx_unlock(bp->dev);
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
909 fp->state = BNX2X_FP_STATE_OPEN;
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
915 fp->state = BNX2X_FP_STATE_HALTED;
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948 bp->set_mac_pending = 0;
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
970 /* Skip "next page" elements */
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 if (unlikely(page == NULL))
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1032 PCI_DMA_FROMDEVICE);
1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 last_max = fp->last_max_sge;
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1102 u16 last_max, last_elem, first_elem;
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1196 fp->tpa_queue_used);
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1215 /* This is needed in order to enable forwarding support */
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1220 #ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
1246 bp->eth_stats.rx_skb_alloc_failed++;
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1261 frag_size -= frag_len;
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1282 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1287 prefetch(((char *)(skb)) + 128);
1289 #ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1299 skb_reserve(skb, pad);
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 iph = (struct iphdr *)skb->data;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1324 netif_receive_skb(skb);
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1331 bp->dev->last_rx = jiffies;
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1337 /* else drop the packet and keep the buffer in the bin */
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
1340 bp->eth_stats.rx_skb_alloc_failed++;
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1359 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362 ((u32 *)&rx_prods)[i]);
1364 DP(NETIF_MSG_RX_STATUS,
1365 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1366 bd_prod, rx_comp_prod, rx_sge_prod);
1369 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1371 struct bnx2x *bp = fp->bp;
1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (unlikely(bp->panic))
1381 /* CQ "next element" is of the size of the regular element,
1382 that's why it's ok here */
1383 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1384 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1387 bd_cons = fp->rx_bd_cons;
1388 bd_prod = fp->rx_bd_prod;
1389 bd_prod_fw = bd_prod;
1390 sw_comp_cons = fp->rx_comp_cons;
1391 sw_comp_prod = fp->rx_comp_prod;
1393 /* Memory barrier necessary as speculative reads of the rx
1394 * buffer can be ahead of the index in the status block
1398 DP(NETIF_MSG_RX_STATUS,
1399 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1400 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1402 while (sw_comp_cons != hw_comp_cons) {
1403 struct sw_rx_bd *rx_buf = NULL;
1404 struct sk_buff *skb;
1405 union eth_rx_cqe *cqe;
1409 comp_ring_cons = RCQ_BD(sw_comp_cons);
1410 bd_prod = RX_BD(bd_prod);
1411 bd_cons = RX_BD(bd_cons);
1413 cqe = &fp->rx_comp_ring[comp_ring_cons];
1414 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1416 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1417 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1418 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1419 cqe->fast_path_cqe.rss_hash_result,
1420 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1421 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1423 /* is this a slowpath msg? */
1424 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1425 bnx2x_sp_event(fp, cqe);
1428 /* this is an rx packet */
1430 rx_buf = &fp->rx_buf_ring[bd_cons];
1432 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1433 pad = cqe->fast_path_cqe.placement_offset;
1435 /* If CQE is marked both TPA_START and TPA_END
1436 it is a non-TPA CQE */
1437 if ((!fp->disable_tpa) &&
1438 (TPA_TYPE(cqe_fp_flags) !=
1439 (TPA_TYPE_START | TPA_TYPE_END))) {
1440 u16 queue = cqe->fast_path_cqe.queue_index;
1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1443 DP(NETIF_MSG_RX_STATUS,
1444 "calling tpa_start on queue %d\n",
1447 bnx2x_tpa_start(fp, queue, skb,
1452 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1453 DP(NETIF_MSG_RX_STATUS,
1454 "calling tpa_stop on queue %d\n",
1457 if (!BNX2X_RX_SUM_FIX(cqe))
1458 BNX2X_ERR("STOP on none TCP "
1461 /* This is a size of the linear data
1463 len = le16_to_cpu(cqe->fast_path_cqe.
1465 bnx2x_tpa_stop(bp, fp, queue, pad,
1466 len, cqe, comp_ring_cons);
1467 #ifdef BNX2X_STOP_ON_ERROR
1472 bnx2x_update_sge_prod(fp,
1473 &cqe->fast_path_cqe);
1478 pci_dma_sync_single_for_device(bp->pdev,
1479 pci_unmap_addr(rx_buf, mapping),
1480 pad + RX_COPY_THRESH,
1481 PCI_DMA_FROMDEVICE);
1483 prefetch(((char *)(skb)) + 128);
1485 /* is this an error packet? */
1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1487 DP(NETIF_MSG_RX_ERR,
1488 "ERROR flags %x rx packet %u\n",
1489 cqe_fp_flags, sw_comp_cons);
1490 bp->eth_stats.rx_err_discard_pkt++;
1494 /* Since we don't have a jumbo ring
1495 * copy small packets if mtu > 1500
1497 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1498 (len <= RX_COPY_THRESH)) {
1499 struct sk_buff *new_skb;
1501 new_skb = netdev_alloc_skb(bp->dev,
1503 if (new_skb == NULL) {
1504 DP(NETIF_MSG_RX_ERR,
1505 "ERROR packet dropped "
1506 "because of alloc failure\n");
1507 bp->eth_stats.rx_skb_alloc_failed++;
1512 skb_copy_from_linear_data_offset(skb, pad,
1513 new_skb->data + pad, len);
1514 skb_reserve(new_skb, pad);
1515 skb_put(new_skb, len);
1517 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1521 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1522 pci_unmap_single(bp->pdev,
1523 pci_unmap_addr(rx_buf, mapping),
1524 bp->rx_buf_use_size,
1525 PCI_DMA_FROMDEVICE);
1526 skb_reserve(skb, pad);
1530 DP(NETIF_MSG_RX_ERR,
1531 "ERROR packet dropped because "
1532 "of alloc failure\n");
1533 bp->eth_stats.rx_skb_alloc_failed++;
1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1539 skb->protocol = eth_type_trans(skb, bp->dev);
1541 skb->ip_summed = CHECKSUM_NONE;
1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1546 bp->eth_stats.hw_csum_err++;
1551 if ((bp->vlgrp != NULL) &&
1552 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1553 PARSING_FLAGS_VLAN))
1554 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1555 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1558 netif_receive_skb(skb);
1560 bp->dev->last_rx = jiffies;
1565 bd_cons = NEXT_RX_IDX(bd_cons);
1566 bd_prod = NEXT_RX_IDX(bd_prod);
1567 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1570 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1573 if (rx_pkt == budget)
1577 fp->rx_bd_cons = bd_cons;
1578 fp->rx_bd_prod = bd_prod_fw;
1579 fp->rx_comp_cons = sw_comp_cons;
1580 fp->rx_comp_prod = sw_comp_prod;
1582 /* Update producers */
1583 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1585 mmiowb(); /* keep prod updates ordered */
1587 fp->rx_pkt += rx_pkt;
1593 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1595 struct bnx2x_fastpath *fp = fp_cookie;
1596 struct bnx2x *bp = fp->bp;
1597 struct net_device *dev = bp->dev;
1598 int index = FP_IDX(fp);
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607 index, FP_SB_ID(fp));
1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1610 #ifdef BNX2X_STOP_ON_ERROR
1611 if (unlikely(bp->panic))
1615 prefetch(fp->rx_cons_sb);
1616 prefetch(fp->tx_cons_sb);
1617 prefetch(&fp->status_blk->c_status_block.status_block_index);
1618 prefetch(&fp->status_blk->u_status_block.status_block_index);
1620 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1625 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1627 struct net_device *dev = dev_instance;
1628 struct bnx2x *bp = netdev_priv(dev);
1629 u16 status = bnx2x_ack_int(bp);
1632 /* Return here if interrupt is shared and it's not for us */
1633 if (unlikely(status == 0)) {
1634 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1639 /* Return here if interrupt is disabled */
1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1650 mask = 0x2 << bp->fp[0].sb_id;
1651 if (status & mask) {
1652 struct bnx2x_fastpath *fp = &bp->fp[0];
1654 prefetch(fp->rx_cons_sb);
1655 prefetch(fp->tx_cons_sb);
1656 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657 prefetch(&fp->status_blk->u_status_block.status_block_index);
1659 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1665 if (unlikely(status & 0x1)) {
1666 schedule_work(&bp->sp_task);
1674 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1680 /* end of fast path */
1682 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1687 * General service functions
1690 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1693 u32 resource_bit = (1 << resource);
1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
1698 /* Validating that the resource is within range */
1699 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1701 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1713 /* Validating that the resource is not already taken */
1714 lock_status = REG_RD(bp, hw_lock_control_reg);
1715 if (lock_status & resource_bit) {
1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1717 lock_status, resource_bit);
1721 /* Try for 1 second every 5ms */
1722 for (cnt = 0; cnt < 200; cnt++) {
1723 /* Try to acquire the lock */
1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1725 lock_status = REG_RD(bp, hw_lock_control_reg);
1726 if (lock_status & resource_bit)
1731 DP(NETIF_MSG_HW, "Timeout\n");
1735 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1738 u32 resource_bit = (1 << resource);
1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
1742 /* Validating that the resource is within range */
1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1745 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1757 /* Validating that the resource is currently taken */
1758 lock_status = REG_RD(bp, hw_lock_control_reg);
1759 if (!(lock_status & resource_bit)) {
1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1761 lock_status, resource_bit);
1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
1769 /* HW Lock for shared dual port PHYs */
1770 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1774 mutex_lock(&bp->port.phy_mutex);
1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1781 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1789 mutex_unlock(&bp->port.phy_mutex);
1792 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1794 /* The GPIO should be swapped if swap register is set and active */
1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1797 int gpio_shift = gpio_num +
1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1799 u32 gpio_mask = (1 << gpio_shift);
1802 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1803 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1808 /* read GPIO and mask except the float bits */
1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1812 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1813 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1814 gpio_num, gpio_shift);
1815 /* clear FLOAT and set CLR */
1816 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1817 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1820 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1821 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1822 gpio_num, gpio_shift);
1823 /* clear FLOAT and set SET */
1824 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1830 gpio_num, gpio_shift);
1832 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1845 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1847 u32 spio_mask = (1 << spio_num);
1850 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1851 (spio_num > MISC_REGISTERS_SPIO_7)) {
1852 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1857 /* read SPIO and mask except the float bits */
1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1863 /* clear FLOAT and set CLR */
1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1870 /* clear FLOAT and set SET */
1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1872 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1875 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1876 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1891 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1893 switch (bp->link_vars.ieee_fc) {
1894 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1895 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1898 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1899 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1902 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1903 bp->port.advertising |= ADVERTISED_Asym_Pause;
1906 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1912 static void bnx2x_link_report(struct bnx2x *bp)
1914 if (bp->link_vars.link_up) {
1915 if (bp->state == BNX2X_STATE_OPEN)
1916 netif_carrier_on(bp->dev);
1917 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1919 printk("%d Mbps ", bp->link_vars.line_speed);
1921 if (bp->link_vars.duplex == DUPLEX_FULL)
1922 printk("full duplex");
1924 printk("half duplex");
1926 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1927 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1928 printk(", receive ");
1929 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1930 printk("& transmit ");
1932 printk(", transmit ");
1934 printk("flow control ON");
1938 } else { /* link_down */
1939 netif_carrier_off(bp->dev);
1940 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1944 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1946 if (!BP_NOMCP(bp)) {
1949 /* Initialize link parameters structure variables */
1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1959 bnx2x_acquire_phy_lock(bp);
1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1961 bnx2x_release_phy_lock(bp);
1963 if (bp->link_vars.link_up)
1964 bnx2x_link_report(bp);
1966 bnx2x_calc_fc_adv(bp);
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1974 static void bnx2x_link_set(struct bnx2x *bp)
1976 if (!BP_NOMCP(bp)) {
1977 bnx2x_acquire_phy_lock(bp);
1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979 bnx2x_release_phy_lock(bp);
1981 bnx2x_calc_fc_adv(bp);
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1986 static void bnx2x__link_reset(struct bnx2x *bp)
1988 if (!BP_NOMCP(bp)) {
1989 bnx2x_acquire_phy_lock(bp);
1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991 bnx2x_release_phy_lock(bp);
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1996 static u8 bnx2x_link_test(struct bnx2x *bp)
2000 bnx2x_acquire_phy_lock(bp);
2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2002 bnx2x_release_phy_lock(bp);
2007 /* Calculates the sum of vn_min_rates.
2008 It's needed for further normalizing of the min_rates.
2013 0 - if all the min_rates are 0.
2014 In the later case fairness algorithm should be deactivated.
2015 If not all min_rates are zero then those that are zeroes will
2018 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2020 int i, port = BP_PORT(bp);
2024 for (i = 0; i < E1HVN_MAX; i++) {
2026 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2027 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2028 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2029 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2030 /* If min rate is zero - set it to 1 */
2032 vn_min_rate = DEF_MIN_RATE;
2036 wsum += vn_min_rate;
2040 /* ... only if all min rates are zeros - disable FAIRNESS */
2047 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2050 struct cmng_struct_per_port *m_cmng_port)
2052 u32 r_param = port_rate / 8;
2053 int port = BP_PORT(bp);
2056 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2058 /* Enable minmax only if we are in e1hmf mode */
2060 u32 fair_periodic_timeout_usec;
2063 /* Enable rate shaping and fairness */
2064 m_cmng_port->flags.cmng_vn_enable = 1;
2065 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2066 m_cmng_port->flags.rate_shaping_enable = 1;
2069 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2070 " fairness will be disabled\n");
2072 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073 m_cmng_port->rs_vars.rs_periodic_timeout =
2074 RS_PERIODIC_TIMEOUT_USEC / 4;
2076 /* this is the threshold below which no timer arming will occur
2077 1.25 coefficient is for the threshold to be a little bigger
2078 than the real time, to compensate for timer in-accuracy */
2079 m_cmng_port->rs_vars.rs_threshold =
2080 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2082 /* resolution of fairness timer */
2083 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2084 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085 t_fair = T_FAIR_COEF / port_rate;
2087 /* this is the threshold below which we won't arm
2088 the timer anymore */
2089 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2091 /* we multiply by 1e3/8 to get bytes/msec.
2092 We don't want the credits to pass a credit
2093 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094 m_cmng_port->fair_vars.upper_bound =
2095 r_param * t_fair * FAIR_MEM;
2096 /* since each tick is 4 usec */
2097 m_cmng_port->fair_vars.fairness_timeout =
2098 fair_periodic_timeout_usec / 4;
2101 /* Disable rate shaping and fairness */
2102 m_cmng_port->flags.cmng_vn_enable = 0;
2103 m_cmng_port->flags.fairness_enable = 0;
2104 m_cmng_port->flags.rate_shaping_enable = 0;
2107 "Single function mode minmax will be disabled\n");
2110 /* Store it to internal memory */
2111 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2112 REG_WR(bp, BAR_XSTRORM_INTMEM +
2113 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2114 ((u32 *)(m_cmng_port))[i]);
2117 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2118 u32 wsum, u16 port_rate,
2119 struct cmng_struct_per_port *m_cmng_port)
2121 struct rate_shaping_vars_per_vn m_rs_vn;
2122 struct fairness_vars_per_vn m_fair_vn;
2123 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2124 u16 vn_min_rate, vn_max_rate;
2127 /* If function is hidden - set min and max to zeroes */
2128 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2133 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136 if current min rate is zero - set it to 1.
2137 This is a requirement of the algorithm. */
2138 if ((vn_min_rate == 0) && wsum)
2139 vn_min_rate = DEF_MIN_RATE;
2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2141 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2144 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2145 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2147 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2148 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2150 /* global vn counter - maximal Mbps for this vn */
2151 m_rs_vn.vn_counter.rate = vn_max_rate;
2153 /* quota - number of bytes transmitted in this period */
2154 m_rs_vn.vn_counter.quota =
2155 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2157 #ifdef BNX2X_PER_PROT_QOS
2158 /* per protocol counter */
2159 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2160 /* maximal Mbps for this protocol */
2161 m_rs_vn.protocol_counters[protocol].rate =
2162 protocol_max_rate[protocol];
2163 /* the quota in each timer period -
2164 number of bytes transmitted in this period */
2165 m_rs_vn.protocol_counters[protocol].quota =
2166 (u32)(rs_periodic_timeout_usec *
2168 protocol_counters[protocol].rate/8));
2173 /* credit for each period of the fairness algorithm:
2174 number of bytes in T_FAIR (the vn share the port rate).
2175 wsum should not be larger than 10000, thus
2176 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177 m_fair_vn.vn_credit_delta =
2178 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2179 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2180 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2181 m_fair_vn.vn_credit_delta);
2184 #ifdef BNX2X_PER_PROT_QOS
2186 u32 protocolWeightSum = 0;
2188 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2189 protocolWeightSum +=
2190 drvInit.protocol_min_rate[protocol];
2191 /* per protocol counter -
2192 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193 if (protocolWeightSum > 0) {
2195 protocol < NUM_OF_PROTOCOLS; protocol++)
2196 /* credit for each period of the
2197 fairness algorithm - number of bytes in
2198 T_FAIR (the protocol share the vn rate) */
2199 m_fair_vn.protocol_credit_delta[protocol] =
2200 (u32)((vn_min_rate / 8) * t_fair *
2201 protocol_min_rate / protocolWeightSum);
2206 /* Store it to internal memory */
2207 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2208 REG_WR(bp, BAR_XSTRORM_INTMEM +
2209 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2210 ((u32 *)(&m_rs_vn))[i]);
2212 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2213 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2215 ((u32 *)(&m_fair_vn))[i]);
2218 /* This function is called upon link interrupt */
2219 static void bnx2x_link_attn(struct bnx2x *bp)
2223 /* Make sure that we are synced with the current statistics */
2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2226 bnx2x_acquire_phy_lock(bp);
2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2228 bnx2x_release_phy_lock(bp);
2230 if (bp->link_vars.link_up) {
2232 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2233 struct host_port_stats *pstats;
2235 pstats = bnx2x_sp(bp, port_stats);
2236 /* reset old bmac stats */
2237 memset(&(pstats->mac_stx[0]), 0,
2238 sizeof(struct mac_stx));
2240 if ((bp->state == BNX2X_STATE_OPEN) ||
2241 (bp->state == BNX2X_STATE_DISABLED))
2242 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
2251 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2252 if (vn == BP_E1HVN(bp))
2255 func = ((vn << 1) | BP_PORT(bp));
2257 /* Set the attention towards other drivers
2259 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2260 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2264 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2265 struct cmng_struct_per_port m_cmng_port;
2267 int port = BP_PORT(bp);
2269 /* Init RATE SHAPING and FAIRNESS contexts */
2270 wsum = bnx2x_calc_vn_wsum(bp);
2271 bnx2x_init_port_minmax(bp, (int)wsum,
2272 bp->link_vars.line_speed,
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2276 bnx2x_init_vn_minmax(bp, 2*vn + port,
2277 wsum, bp->link_vars.line_speed,
2282 static void bnx2x__link_status_update(struct bnx2x *bp)
2284 if (bp->state != BNX2X_STATE_OPEN)
2287 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2289 if (bp->link_vars.link_up)
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2292 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2294 /* indicate link status */
2295 bnx2x_link_report(bp);
2298 static void bnx2x_pmf_update(struct bnx2x *bp)
2300 int port = BP_PORT(bp);
2304 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2306 /* enable nig attention */
2307 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2308 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2309 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2311 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2319 * General service functions
2322 /* the slow path queue is odd since completions arrive on the fastpath ring */
2323 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2324 u32 data_hi, u32 data_lo, int common)
2326 int func = BP_FUNC(bp);
2328 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2329 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2330 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2331 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2332 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2334 #ifdef BNX2X_STOP_ON_ERROR
2335 if (unlikely(bp->panic))
2339 spin_lock_bh(&bp->spq_lock);
2341 if (!bp->spq_left) {
2342 BNX2X_ERR("BUG! SPQ ring full!\n");
2343 spin_unlock_bh(&bp->spq_lock);
2348 /* CID needs port number to be encoded int it */
2349 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2350 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2352 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2354 bp->spq_prod_bd->hdr.type |=
2355 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2357 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2358 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2362 if (bp->spq_prod_bd == bp->spq_last_bd) {
2363 bp->spq_prod_bd = bp->spq;
2364 bp->spq_prod_idx = 0;
2365 DP(NETIF_MSG_TIMER, "end of spq\n");
2372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2375 spin_unlock_bh(&bp->spq_lock);
2379 /* acquire split MCP access lock register */
2380 static int bnx2x_acquire_alr(struct bnx2x *bp)
2387 for (j = 0; j < i*10; j++) {
2389 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2391 if (val & (1L << 31))
2396 if (!(val & (1L << 31))) {
2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2404 /* release split MCP access lock register */
2405 static void bnx2x_release_alr(struct bnx2x *bp)
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2412 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2414 struct host_def_status_block *def_sb = bp->def_status_blk;
2417 barrier(); /* status block is written to by the chip */
2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2422 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2423 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2426 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2427 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2430 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2431 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2434 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2435 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2442 * slow path service functions
2445 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2447 int port = BP_PORT(bp);
2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2449 COMMAND_REG_ATTN_BITS_SET);
2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2453 NIG_REG_MASK_INTERRUPT_PORT0;
2456 if (bp->attn_state & asserted)
2457 BNX2X_ERR("IGU ERROR\n");
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2463 aeu_mask, asserted);
2464 aeu_mask &= ~(asserted & 0xff);
2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2471 bp->attn_state |= asserted;
2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2475 if (asserted & ATTN_NIG_FOR_FUNC) {
2477 /* save nig interrupt mask */
2478 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2479 REG_WR(bp, nig_int_mask_addr, 0);
2481 bnx2x_link_attn(bp);
2483 /* handle unicore attn? */
2485 if (asserted & ATTN_SW_TIMER_4_FUNC)
2486 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2488 if (asserted & GPIO_2_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2491 if (asserted & GPIO_3_FUNC)
2492 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2494 if (asserted & GPIO_4_FUNC)
2495 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2498 if (asserted & ATTN_GENERAL_ATTN_1) {
2499 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2500 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2502 if (asserted & ATTN_GENERAL_ATTN_2) {
2503 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2504 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2506 if (asserted & ATTN_GENERAL_ATTN_3) {
2507 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2508 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2511 if (asserted & ATTN_GENERAL_ATTN_4) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2515 if (asserted & ATTN_GENERAL_ATTN_5) {
2516 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2517 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2519 if (asserted & ATTN_GENERAL_ATTN_6) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2525 } /* if hardwired */
2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2529 REG_WR(bp, hc_addr, asserted);
2531 /* now set back the mask */
2532 if (asserted & ATTN_NIG_FOR_FUNC)
2533 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2536 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2538 int port = BP_PORT(bp);
2542 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2543 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2545 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2547 val = REG_RD(bp, reg_offset);
2548 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2549 REG_WR(bp, reg_offset, val);
2551 BNX2X_ERR("SPIO5 hw attention\n");
2553 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555 /* Fan failure attention */
2557 /* The PHY reset is controlled by GPIO 1 */
2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560 /* Low power mode is controlled by GPIO 2 */
2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563 /* mark the failure */
2564 bp->link_params.ext_phy_config &=
2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566 bp->link_params.ext_phy_config |=
2567 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2569 dev_info.port_hw_config[port].
2570 external_phy_config,
2571 bp->link_params.ext_phy_config);
2572 /* log the failure */
2573 printk(KERN_ERR PFX "Fan Failure on Network"
2574 " Controller %s has caused the driver to"
2575 " shutdown the card to prevent permanent"
2576 " damage. Please contact Dell Support for"
2577 " assistance\n", bp->dev->name);
2585 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2587 val = REG_RD(bp, reg_offset);
2588 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589 REG_WR(bp, reg_offset, val);
2591 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592 (attn & HW_INTERRUT_ASSERT_SET_0));
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2601 if (attn & BNX2X_DOORQ_ASSERT) {
2603 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605 /* DORQ discard attention */
2607 BNX2X_ERR("FATAL error from DORQ\n");
2610 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2612 int port = BP_PORT(bp);
2615 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620 REG_WR(bp, reg_offset, val);
2622 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_1));
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2632 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2634 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636 /* CFC error attention */
2638 BNX2X_ERR("FATAL error from CFC\n");
2641 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2643 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645 /* RQ_USDMDP_FIFO_OVERFLOW */
2647 BNX2X_ERR("FATAL error from PXP\n");
2650 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2652 int port = BP_PORT(bp);
2655 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2658 val = REG_RD(bp, reg_offset);
2659 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660 REG_WR(bp, reg_offset, val);
2662 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663 (attn & HW_INTERRUT_ASSERT_SET_2));
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2672 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2674 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675 int func = BP_FUNC(bp);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678 bnx2x__link_status_update(bp);
2679 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2681 bnx2x_pmf_update(bp);
2683 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2685 BNX2X_ERR("MC assert!\n");
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2692 } else if (attn & BNX2X_MCP_ASSERT) {
2694 BNX2X_ERR("MCP assert!\n");
2695 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2699 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2702 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704 if (attn & BNX2X_GRC_TIMEOUT) {
2705 val = CHIP_IS_E1H(bp) ?
2706 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2709 if (attn & BNX2X_GRC_RSV) {
2710 val = CHIP_IS_E1H(bp) ?
2711 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2714 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2720 struct attn_route attn;
2721 struct attn_route group_mask;
2722 int port = BP_PORT(bp);
2728 /* need to take HW lock because MCP or other port might also
2729 try to handle this event */
2730 bnx2x_acquire_alr(bp);
2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2739 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740 if (deasserted & (1 << index)) {
2741 group_mask = bp->attn_group[index];
2743 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744 index, group_mask.sig[0], group_mask.sig[1],
2745 group_mask.sig[2], group_mask.sig[3]);
2747 bnx2x_attn_int_deasserted3(bp,
2748 attn.sig[3] & group_mask.sig[3]);
2749 bnx2x_attn_int_deasserted1(bp,
2750 attn.sig[1] & group_mask.sig[1]);
2751 bnx2x_attn_int_deasserted2(bp,
2752 attn.sig[2] & group_mask.sig[2]);
2753 bnx2x_attn_int_deasserted0(bp,
2754 attn.sig[0] & group_mask.sig[0]);
2756 if ((attn.sig[0] & group_mask.sig[0] &
2757 HW_PRTY_ASSERT_SET_0) ||
2758 (attn.sig[1] & group_mask.sig[1] &
2759 HW_PRTY_ASSERT_SET_1) ||
2760 (attn.sig[2] & group_mask.sig[2] &
2761 HW_PRTY_ASSERT_SET_2))
2762 BNX2X_ERR("FATAL HW block parity attention\n");
2766 bnx2x_release_alr(bp);
2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2773 REG_WR(bp, reg_addr, val);
2775 if (~bp->attn_state & deasserted)
2776 BNX2X_ERR("IGU ERROR\n");
2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782 aeu_mask = REG_RD(bp, reg_addr);
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793 bp->attn_state &= ~deasserted;
2794 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2799 /* read local copy of bits */
2800 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802 u32 attn_state = bp->attn_state;
2804 /* look for changed bits */
2805 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2806 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2809 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2810 attn_bits, attn_ack, asserted, deasserted);
2812 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813 BNX2X_ERR("BAD attention state\n");
2815 /* handle bits that were raised */
2817 bnx2x_attn_int_asserted(bp, asserted);
2820 bnx2x_attn_int_deasserted(bp, deasserted);
2823 static void bnx2x_sp_task(struct work_struct *work)
2825 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2829 /* Return here if interrupt is disabled */
2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2835 status = bnx2x_update_dsb_idx(bp);
2836 /* if (status == 0) */
2837 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2845 /* CStorm events: query_stats, port delete ramrod */
2847 bp->stats_pending = 0;
2849 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2851 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2853 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2855 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2857 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2864 struct net_device *dev = dev_instance;
2865 struct bnx2x *bp = netdev_priv(dev);
2867 /* Return here if interrupt is disabled */
2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2873 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2875 #ifdef BNX2X_STOP_ON_ERROR
2876 if (unlikely(bp->panic))
2880 schedule_work(&bp->sp_task);
2885 /* end of slow path */
2889 /****************************************************************************
2891 ****************************************************************************/
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2897 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2903 if (m_lo < s_lo) { \
2905 d_hi = m_hi - s_hi; \
2907 /* we can 'loan' 1 */ \
2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2911 /* m_hi <= s_hi */ \
2916 /* m_lo >= s_lo */ \
2917 if (m_hi < s_hi) { \
2921 /* m_hi >= s_hi */ \
2922 d_hi = m_hi - s_hi; \
2923 d_lo = m_lo - s_lo; \
2928 #define UPDATE_STAT64(s, t) \
2930 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935 pstats->mac_stx[1].t##_lo, diff.lo); \
2938 #define UPDATE_STAT64_NIG(s, t) \
2940 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941 diff.lo, new->s##_lo, old->s##_lo); \
2942 ADD_64(estats->t##_hi, diff.hi, \
2943 estats->t##_lo, diff.lo); \
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2950 s_hi += (s_lo < a) ? 1 : 0; \
2953 #define UPDATE_EXTEND_STAT(s) \
2955 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956 pstats->mac_stx[1].s##_lo, \
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2962 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963 old_tclient->s = le32_to_cpu(tclient->s); \
2964 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2969 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970 old_xclient->s = le32_to_cpu(xclient->s); \
2971 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2975 * General service functions
2978 static inline long bnx2x_hilo(u32 *hiref)
2980 u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2984 return HILO_U64(hi, lo);
2991 * Init service functions
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2996 if (!bp->stats_pending) {
2997 struct eth_query_ramrod_data ramrod_data = {0};
3000 ramrod_data.drv_counter = bp->stats_counter++;
3001 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3004 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005 ((u32 *)&ramrod_data)[1],
3006 ((u32 *)&ramrod_data)[0], 0);
3008 /* stats ramrod has it's own slot on the spq */
3010 bp->stats_pending = 1;
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3017 int port = BP_PORT(bp);
3019 bp->executer_idx = 0;
3020 bp->stats_counter = 0;
3024 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3026 bp->port.port_stx = 0;
3027 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030 bp->port.old_nig_stats.brb_discard =
3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039 /* function stats */
3040 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045 bp->stats_state = STATS_STATE_DISABLED;
3046 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052 struct dmae_command *dmae = &bp->stats_dmae;
3053 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055 *stats_comp = DMAE_COMP_VAL;
3058 if (bp->executer_idx) {
3059 int loader_idx = PMF_DMAE_C(bp);
3061 memset(dmae, 0, sizeof(struct dmae_command));
3063 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065 DMAE_CMD_DST_RESET |
3067 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3069 DMAE_CMD_ENDIANITY_DW_SWAP |
3071 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3073 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077 sizeof(struct dmae_command) *
3078 (loader_idx + 1)) >> 2;
3079 dmae->dst_addr_hi = 0;
3080 dmae->len = sizeof(struct dmae_command) >> 2;
3083 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084 dmae->comp_addr_hi = 0;
3088 bnx2x_post_dmae(bp, dmae, loader_idx);
3090 } else if (bp->func_stx) {
3092 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3114 * Statistics service functions
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119 struct dmae_command *dmae;
3121 int loader_idx = PMF_DMAE_C(bp);
3122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126 BNX2X_ERR("BUG!\n");
3130 bp->executer_idx = 0;
3132 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3134 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145 dmae->src_addr_lo = bp->port.port_stx >> 2;
3146 dmae->src_addr_hi = 0;
3147 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->len = DMAE_LEN32_RD_MAX;
3150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151 dmae->comp_addr_hi = 0;
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159 DMAE_LEN32_RD_MAX * 4);
3160 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161 DMAE_LEN32_RD_MAX * 4);
3162 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_val = DMAE_COMP_VAL;
3168 bnx2x_hw_stats_post(bp);
3169 bnx2x_stats_comp(bp);
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3174 struct dmae_command *dmae;
3175 int port = BP_PORT(bp);
3176 int vn = BP_E1HVN(bp);
3178 int loader_idx = PMF_DMAE_C(bp);
3180 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3183 if (!bp->link_vars.link_up || !bp->port.pmf) {
3184 BNX2X_ERR("BUG!\n");
3188 bp->executer_idx = 0;
3191 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3197 DMAE_CMD_ENDIANITY_DW_SWAP |
3199 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200 (vn << DMAE_CMD_E1HVN_SHIFT));
3202 if (bp->port.port_stx) {
3204 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205 dmae->opcode = opcode;
3206 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209 dmae->dst_addr_hi = 0;
3210 dmae->len = sizeof(struct host_port_stats) >> 2;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212 dmae->comp_addr_hi = 0;
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->dst_addr_lo = bp->func_stx >> 2;
3223 dmae->dst_addr_hi = 0;
3224 dmae->len = sizeof(struct host_func_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3239 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (vn << DMAE_CMD_E1HVN_SHIFT));
3242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3244 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245 NIG_REG_INGRESS_BMAC0_MEM);
3247 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248 BIGMAC_REGISTER_TX_STAT_GTBYT */
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
3251 dmae->src_addr_lo = (mac_addr +
3252 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253 dmae->src_addr_hi = 0;
3254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259 dmae->comp_addr_hi = 0;
3262 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276 dmae->comp_addr_hi = 0;
3279 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3281 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = opcode;
3286 dmae->src_addr_lo = (mac_addr +
3287 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3296 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298 dmae->opcode = opcode;
3299 dmae->src_addr_lo = (mac_addr +
3300 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301 dmae->src_addr_hi = 0;
3302 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3311 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349 dmae->len = (2*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3361 DMAE_CMD_ENDIANITY_DW_SWAP |
3363 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364 (vn << DMAE_CMD_E1HVN_SHIFT));
3365 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372 dmae->len = (2*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_val = DMAE_COMP_VAL;
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3386 if (!bp->func_stx) {
3387 BNX2X_ERR("BUG!\n");
3391 bp->executer_idx = 0;
3392 memset(dmae, 0, sizeof(struct dmae_command));
3394 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3398 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400 DMAE_CMD_ENDIANITY_DW_SWAP |
3402 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->dst_addr_lo = bp->func_stx >> 2;
3407 dmae->dst_addr_hi = 0;
3408 dmae->len = sizeof(struct host_func_stats) >> 2;
3409 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_val = DMAE_COMP_VAL;
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3419 bnx2x_port_stats_init(bp);
3421 else if (bp->func_stx)
3422 bnx2x_func_stats_init(bp);
3424 bnx2x_hw_stats_post(bp);
3425 bnx2x_storm_stats_post(bp);
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_pmf_update(bp);
3432 bnx2x_stats_start(bp);
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3437 bnx2x_stats_comp(bp);
3438 bnx2x_stats_start(bp);
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445 struct regpair diff;
3447 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459 UPDATE_STAT64(tx_stat_gt127,
3460 tx_stat_etherstatspkts65octetsto127octets);
3461 UPDATE_STAT64(tx_stat_gt255,
3462 tx_stat_etherstatspkts128octetsto255octets);
3463 UPDATE_STAT64(tx_stat_gt511,
3464 tx_stat_etherstatspkts256octetsto511octets);
3465 UPDATE_STAT64(tx_stat_gt1023,
3466 tx_stat_etherstatspkts512octetsto1023octets);
3467 UPDATE_STAT64(tx_stat_gt1518,
3468 tx_stat_etherstatspkts1024octetsto1522octets);
3469 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473 UPDATE_STAT64(tx_stat_gterr,
3474 tx_stat_dot3statsinternalmactransmiterrors);
3475 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3480 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3483 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3518 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519 struct nig_stats *old = &(bp->port.old_nig_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522 struct regpair diff;
3524 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525 bnx2x_bmac_stats_update(bp);
3527 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528 bnx2x_emac_stats_update(bp);
3530 else { /* unreached */
3531 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3544 memcpy(old, new, sizeof(struct nig_stats));
3546 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547 sizeof(struct mac_stx));
3548 estats->brb_drop_hi = pstats->brb_drop_hi;
3549 estats->brb_drop_lo = pstats->brb_drop_lo;
3551 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3558 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559 int cl_id = BP_CL_ID(bp);
3560 struct tstorm_per_port_stats *tport =
3561 &stats->tstorm_common.port_statistics;
3562 struct tstorm_per_client_stats *tclient =
3563 &stats->tstorm_common.client_statistics[cl_id];
3564 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565 struct xstorm_per_client_stats *xclient =
3566 &stats->xstorm_common.client_statistics[cl_id];
3567 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3572 /* are storm stats valid? */
3573 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574 bp->stats_counter) {
3575 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient->stats_counter, bp->stats_counter);
3580 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581 bp->stats_counter) {
3582 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient->stats_counter, bp->stats_counter);
3588 fstats->total_bytes_received_hi =
3589 fstats->valid_bytes_received_hi =
3590 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591 fstats->total_bytes_received_lo =
3592 fstats->valid_bytes_received_lo =
3593 le32_to_cpu(tclient->total_rcv_bytes.lo);
3595 estats->error_bytes_received_hi =
3596 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597 estats->error_bytes_received_lo =
3598 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599 ADD_64(estats->error_bytes_received_hi,
3600 estats->rx_stat_ifhcinbadoctets_hi,
3601 estats->error_bytes_received_lo,
3602 estats->rx_stat_ifhcinbadoctets_lo);
3604 ADD_64(fstats->total_bytes_received_hi,
3605 estats->error_bytes_received_hi,
3606 fstats->total_bytes_received_lo,
3607 estats->error_bytes_received_lo);
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611 total_multicast_packets_received);
3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613 total_broadcast_packets_received);
3615 fstats->total_bytes_transmitted_hi =
3616 le32_to_cpu(xclient->total_sent_bytes.hi);
3617 fstats->total_bytes_transmitted_lo =
3618 le32_to_cpu(xclient->total_sent_bytes.lo);
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621 total_unicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623 total_multicast_packets_transmitted);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625 total_broadcast_packets_transmitted);
3627 memcpy(estats, &(fstats->total_bytes_received_hi),
3628 sizeof(struct host_func_stats) - 2*sizeof(u32));
3630 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632 estats->brb_truncate_discard =
3633 le32_to_cpu(tport->brb_truncate_discard);
3634 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3636 old_tclient->rcv_unicast_bytes.hi =
3637 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638 old_tclient->rcv_unicast_bytes.lo =
3639 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640 old_tclient->rcv_broadcast_bytes.hi =
3641 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642 old_tclient->rcv_broadcast_bytes.lo =
3643 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644 old_tclient->rcv_multicast_bytes.hi =
3645 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646 old_tclient->rcv_multicast_bytes.lo =
3647 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3650 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651 old_tclient->packets_too_big_discard =
3652 le32_to_cpu(tclient->packets_too_big_discard);
3653 estats->no_buff_discard =
3654 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3657 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658 old_xclient->unicast_bytes_sent.hi =
3659 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660 old_xclient->unicast_bytes_sent.lo =
3661 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662 old_xclient->multicast_bytes_sent.hi =
3663 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664 old_xclient->multicast_bytes_sent.lo =
3665 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666 old_xclient->broadcast_bytes_sent.hi =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668 old_xclient->broadcast_bytes_sent.lo =
3669 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3671 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3678 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680 struct net_device_stats *nstats = &bp->dev->stats;
3682 nstats->rx_packets =
3683 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3687 nstats->tx_packets =
3688 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3692 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3694 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3696 nstats->rx_dropped = old_tclient->checksum_discard +
3697 estats->mac_discard;
3698 nstats->tx_dropped = 0;
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3703 nstats->collisions =
3704 estats->tx_stat_dot3statssinglecollisionframes_lo +
3705 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
3709 estats->jabber_packets_received =
3710 old_tclient->packets_too_big_discard +
3711 estats->rx_stat_dot3statsframestoolong_lo;
3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722 nstats->rx_errors = nstats->rx_length_errors +
3723 nstats->rx_over_errors +
3724 nstats->rx_crc_errors +
3725 nstats->rx_frame_errors +
3726 nstats->rx_fifo_errors +
3727 nstats->rx_missed_errors;
3729 nstats->tx_aborted_errors =
3730 estats->tx_stat_dot3statslatecollisions_lo +
3731 estats->tx_stat_dot3statsexcessivecollisions_lo;
3732 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733 nstats->tx_fifo_errors = 0;
3734 nstats->tx_heartbeat_errors = 0;
3735 nstats->tx_window_errors = 0;
3737 nstats->tx_errors = nstats->tx_aborted_errors +
3738 nstats->tx_carrier_errors;
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3746 if (*stats_comp != DMAE_COMP_VAL)
3750 update = (bnx2x_hw_stats_update(bp) == 0);
3752 update |= (bnx2x_storm_stats_update(bp) == 0);
3755 bnx2x_net_stats_update(bp);
3758 if (bp->stats_pending) {
3759 bp->stats_pending++;
3760 if (bp->stats_pending == 3) {
3761 BNX2X_ERR("stats not updated for 3 times\n");
3768 if (bp->msglevel & NETIF_MSG_TIMER) {
3769 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771 struct net_device_stats *nstats = &bp->dev->stats;
3774 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3777 bnx2x_tx_avail(bp->fp),
3778 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3781 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782 bp->fp->rx_comp_cons),
3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3785 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3786 estats->driver_xoff, estats->brb_drop_lo);
3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3788 "packets_too_big_discard %u no_buff_discard %u "
3789 "mac_discard %u mac_filter_discard %u "
3790 "xxovrflow_discard %u brb_truncate_discard %u "
3791 "ttl0_discard %u\n",
3792 old_tclient->checksum_discard,
3793 old_tclient->packets_too_big_discard,
3794 old_tclient->no_buff_discard, estats->mac_discard,
3795 estats->mac_filter_discard, estats->xxoverflow_discard,
3796 estats->brb_truncate_discard,
3797 old_tclient->ttl0_discard);
3799 for_each_queue(bp, i) {
3800 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801 bnx2x_fp(bp, i, tx_pkt),
3802 bnx2x_fp(bp, i, rx_pkt),
3803 bnx2x_fp(bp, i, rx_calls));
3807 bnx2x_hw_stats_post(bp);
3808 bnx2x_storm_stats_post(bp);
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813 struct dmae_command *dmae;
3815 int loader_idx = PMF_DMAE_C(bp);
3816 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3818 bp->executer_idx = 0;
3820 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3822 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3824 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3826 DMAE_CMD_ENDIANITY_DW_SWAP |
3828 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831 if (bp->port.port_stx) {
3833 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3837 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841 dmae->dst_addr_hi = 0;
3842 dmae->len = sizeof(struct host_port_stats) >> 2;
3844 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845 dmae->comp_addr_hi = 0;
3848 dmae->comp_addr_lo =
3849 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850 dmae->comp_addr_hi =
3851 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_val = DMAE_COMP_VAL;
3860 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->dst_addr_lo = bp->func_stx >> 2;
3865 dmae->dst_addr_hi = 0;
3866 dmae->len = sizeof(struct host_func_stats) >> 2;
3867 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_val = DMAE_COMP_VAL;
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3879 bnx2x_stats_comp(bp);
3882 update = (bnx2x_hw_stats_update(bp) == 0);
3884 update |= (bnx2x_storm_stats_update(bp) == 0);
3887 bnx2x_net_stats_update(bp);
3890 bnx2x_port_stats_stop(bp);
3892 bnx2x_hw_stats_post(bp);
3893 bnx2x_stats_comp(bp);
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3901 static const struct {
3902 void (*action)(struct bnx2x *bp);
3903 enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3907 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3909 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3913 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3914 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3915 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3916 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922 enum bnx2x_stats_state state = bp->stats_state;
3924 bnx2x_stats_stm[state][event].action(bp);
3925 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929 state, event, bp->stats_state);
3932 static void bnx2x_timer(unsigned long data)
3934 struct bnx2x *bp = (struct bnx2x *) data;
3936 if (!netif_running(bp->dev))
3939 if (atomic_read(&bp->intr_sem) != 0)
3943 struct bnx2x_fastpath *fp = &bp->fp[0];
3946 bnx2x_tx_int(fp, 1000);
3947 rc = bnx2x_rx_int(fp, 1000);
3950 if (!BP_NOMCP(bp)) {
3951 int func = BP_FUNC(bp);
3955 ++bp->fw_drv_pulse_wr_seq;
3956 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957 /* TBD - add SYSTEM_TIME */
3958 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3961 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962 MCP_PULSE_SEQ_MASK);
3963 /* The delta between driver pulse and mcp response
3964 * should be 1 (before mcp response) or 0 (after mcp response)
3966 if ((drv_pulse != mcp_pulse) &&
3967 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968 /* someone lost a heartbeat... */
3969 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970 drv_pulse, mcp_pulse);
3974 if ((bp->state == BNX2X_STATE_OPEN) ||
3975 (bp->state == BNX2X_STATE_DISABLED))
3976 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3979 mod_timer(&bp->timer, jiffies + bp->current_interval);
3982 /* end of Statistics */
3987 * nic init service functions
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3992 int port = BP_PORT(bp);
3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996 sizeof(struct ustorm_status_block)/4);
3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999 sizeof(struct cstorm_status_block)/4);
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003 dma_addr_t mapping, int sb_id)
4005 int port = BP_PORT(bp);
4006 int func = BP_FUNC(bp);
4011 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 sb->u_status_block.status_block_id = sb_id;
4015 REG_WR(bp, BAR_USTRORM_INTMEM +
4016 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017 REG_WR(bp, BAR_USTRORM_INTMEM +
4018 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4020 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4023 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4028 section = ((u64)mapping) + offsetof(struct host_status_block,
4030 sb->c_status_block.status_block_id = sb_id;
4032 REG_WR(bp, BAR_CSTRORM_INTMEM +
4033 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034 REG_WR(bp, BAR_CSTRORM_INTMEM +
4035 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4037 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4040 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049 int func = BP_FUNC(bp);
4051 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053 sizeof(struct ustorm_def_status_block)/4);
4054 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056 sizeof(struct cstorm_def_status_block)/4);
4057 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059 sizeof(struct xstorm_def_status_block)/4);
4060 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct tstorm_def_status_block)/4);
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066 struct host_def_status_block *def_sb,
4067 dma_addr_t mapping, int sb_id)
4069 int port = BP_PORT(bp);
4070 int func = BP_FUNC(bp);
4071 int index, val, reg_offset;
4075 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076 atten_status_block);
4077 def_sb->atten_status_block.status_block_id = sb_id;
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103 val = REG_RD(bp, reg_offset);
4105 REG_WR(bp, reg_offset, val);
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
4110 def_sb->u_def_status_block.status_block_id = sb_id;
4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4125 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126 c_def_status_block);
4127 def_sb->c_def_status_block.status_block_id = sb_id;
4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 t_def_status_block);
4144 def_sb->t_def_status_block.status_block_id = sb_id;
4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
4161 def_sb->x_def_status_block.status_block_id = sb_id;
4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4183 int port = BP_PORT(bp);
4186 for_each_queue(bp, i) {
4187 int sb_id = bp->fp[i].sb_id;
4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192 U_SB_ETH_RX_CQ_INDEX),
4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4201 bp->rx_ticks ? 0 : 1);
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206 C_SB_ETH_TX_CQ_INDEX),
4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 C_SB_ETH_TX_CQ_INDEX),
4211 bp->tx_ticks ? 0 : 1);
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size,
4233 PCI_DMA_FROMDEVICE);
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242 int func = BP_FUNC(bp);
4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4248 bp->rx_buf_use_size = bp->dev->mtu;
4249 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4250 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4252 if (bp->flags & TPA_ENABLE_FLAG) {
4254 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_use_size, bp->rx_buf_size,
4256 bp->dev->mtu + ETH_OVREHEAD);
4258 for_each_queue(bp, j) {
4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4261 for (i = 0; i < max_agg_queues; i++) {
4262 fp->tpa_pool[i].skb =
4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264 if (!fp->tpa_pool[i].skb) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4269 bnx2x_free_tpa_pool(bp, fp, i);
4270 fp->disable_tpa = 1;
4273 pci_unmap_addr_set((struct sw_rx_bd *)
4274 &bp->fp->tpa_pool[i],
4276 fp->tpa_state[i] = BNX2X_TPA_STOP;
4281 for_each_queue(bp, j) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4285 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4286 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4288 /* "next page" elements initialization */
4290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291 struct eth_rx_sge *sge;
4293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4302 bnx2x_init_sge_ring_bit_mask(fp);
4305 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306 struct eth_rx_bd *rx_bd;
4308 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4310 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4313 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4314 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i = 0, ring_prod = 0;
4333 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4335 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4342 fp->disable_tpa = 1;
4346 ring_prod = NEXT_SGE_IDX(ring_prod);
4348 fp->rx_sge_prod = ring_prod;
4350 /* Allocate BDs and initialize BD ring */
4351 fp->rx_comp_cons = 0;
4352 cqe_ring_prod = ring_prod = 0;
4353 for (i = 0; i < bp->rx_ring_size; i++) {
4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4357 bp->eth_stats.rx_skb_alloc_failed++;
4360 ring_prod = NEXT_RX_IDX(ring_prod);
4361 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4362 WARN_ON(ring_prod <= i);
4365 fp->rx_bd_prod = ring_prod;
4366 /* must not have more available CQEs than BDs */
4367 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4369 fp->rx_pkt = fp->rx_calls = 0;
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4375 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4382 U64_LO(fp->rx_comp_mapping));
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4385 U64_HI(fp->rx_comp_mapping));
4389 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4393 for_each_queue(bp, j) {
4394 struct bnx2x_fastpath *fp = &bp->fp[j];
4396 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397 struct eth_tx_bd *tx_bd =
4398 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4401 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4404 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4408 fp->tx_pkt_prod = 0;
4409 fp->tx_pkt_cons = 0;
4412 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4417 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4419 int func = BP_FUNC(bp);
4421 spin_lock_init(&bp->spq_lock);
4423 bp->spq_left = MAX_SPQ_PENDING;
4424 bp->spq_prod_idx = 0;
4425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426 bp->spq_prod_bd = bp->spq;
4427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4430 U64_LO(bp->spq_mapping));
4432 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4433 U64_HI(bp->spq_mapping));
4435 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4439 static void bnx2x_init_context(struct bnx2x *bp)
4443 for_each_queue(bp, i) {
4444 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445 struct bnx2x_fastpath *fp = &bp->fp[i];
4446 u8 sb_id = FP_SB_ID(fp);
4448 context->xstorm_st_context.tx_bd_page_base_hi =
4449 U64_HI(fp->tx_desc_mapping);
4450 context->xstorm_st_context.tx_bd_page_base_lo =
4451 U64_LO(fp->tx_desc_mapping);
4452 context->xstorm_st_context.db_data_addr_hi =
4453 U64_HI(fp->tx_prods_mapping);
4454 context->xstorm_st_context.db_data_addr_lo =
4455 U64_LO(fp->tx_prods_mapping);
4456 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4459 context->ustorm_st_context.common.sb_index_numbers =
4460 BNX2X_RX_SB_INDEX_NUM;
4461 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size = 64;
4466 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_use_size;
4468 context->ustorm_st_context.common.bd_page_base_hi =
4469 U64_HI(fp->rx_desc_mapping);
4470 context->ustorm_st_context.common.bd_page_base_lo =
4471 U64_LO(fp->rx_desc_mapping);
4472 if (!fp->disable_tpa) {
4473 context->ustorm_st_context.common.flags |=
4474 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476 context->ustorm_st_context.common.sge_buff_size =
4477 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478 context->ustorm_st_context.common.sge_page_base_hi =
4479 U64_HI(fp->rx_sge_mapping);
4480 context->ustorm_st_context.common.sge_page_base_lo =
4481 U64_LO(fp->rx_sge_mapping);
4484 context->cstorm_st_context.sb_index_number =
4485 C_SB_ETH_TX_CQ_INDEX;
4486 context->cstorm_st_context.status_block_id = sb_id;
4488 context->xstorm_ag_context.cdu_reserved =
4489 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490 CDU_REGION_NUMBER_XCM_AG,
4491 ETH_CONNECTION_TYPE);
4492 context->ustorm_ag_context.cdu_usage =
4493 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494 CDU_REGION_NUMBER_UCM_AG,
4495 ETH_CONNECTION_TYPE);
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4501 int port = BP_PORT(bp);
4507 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511 i % bp->num_queues);
4513 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4518 struct tstorm_eth_client_config tstorm_client = {0};
4519 int port = BP_PORT(bp);
4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524 tstorm_client.config_flags =
4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4527 if (bp->rx_mode && bp->vlgrp) {
4528 tstorm_client.config_flags |=
4529 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4534 if (bp->flags & TPA_ENABLE_FLAG) {
4535 tstorm_client.max_sges_for_packet =
4536 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537 tstorm_client.max_sges_for_packet =
4538 ((tstorm_client.max_sges_for_packet +
4539 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540 PAGES_PER_SGE_SHIFT;
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4546 for_each_queue(bp, i) {
4547 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549 ((u32 *)&tstorm_client)[0]);
4550 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552 ((u32 *)&tstorm_client)[1]);
4555 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4561 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562 int mode = bp->rx_mode;
4563 int mask = (1 << BP_L_ID(bp));
4564 int func = BP_FUNC(bp);
4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4570 case BNX2X_RX_MODE_NONE: /* no Rx */
4571 tstorm_mac_filter.ucast_drop_all = mask;
4572 tstorm_mac_filter.mcast_drop_all = mask;
4573 tstorm_mac_filter.bcast_drop_all = mask;
4575 case BNX2X_RX_MODE_NORMAL:
4576 tstorm_mac_filter.bcast_accept_all = mask;
4578 case BNX2X_RX_MODE_ALLMULTI:
4579 tstorm_mac_filter.mcast_accept_all = mask;
4580 tstorm_mac_filter.bcast_accept_all = mask;
4582 case BNX2X_RX_MODE_PROMISC:
4583 tstorm_mac_filter.ucast_accept_all = mask;
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
4588 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4592 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595 ((u32 *)&tstorm_mac_filter)[i]);
4597 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598 ((u32 *)&tstorm_mac_filter)[i]); */
4601 if (mode != BNX2X_RX_MODE_NONE)
4602 bnx2x_set_client_config(bp);
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4609 /* Zero this manually as its initialization is
4610 currently missing in the initTool */
4611 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4616 static void bnx2x_init_internal_port(struct bnx2x *bp)
4618 int port = BP_PORT(bp);
4620 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4626 static void bnx2x_init_internal_func(struct bnx2x *bp)
4628 struct tstorm_eth_function_common_config tstorm_config = {0};
4629 struct stats_indication_flags stats_flags = {0};
4630 int port = BP_PORT(bp);
4631 int func = BP_FUNC(bp);
4636 tstorm_config.config_flags = MULTI_FLAGS;
4637 tstorm_config.rss_result_mask = MULTI_MASK;
4640 tstorm_config.leading_client_id = BP_L_ID(bp);
4642 REG_WR(bp, BAR_TSTRORM_INTMEM +
4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4644 (*(u32 *)&tstorm_config));
4646 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4647 bnx2x_set_storm_rx_mode(bp);
4649 /* reset xstorm per client statistics */
4650 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4655 /* reset tstorm per client statistics */
4656 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4662 /* Init statistics related context */
4663 stats_flags.collect_eth = 1;
4665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4666 ((u32 *)&stats_flags)[0]);
4667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4668 ((u32 *)&stats_flags)[1]);
4670 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4671 ((u32 *)&stats_flags)[0]);
4672 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4673 ((u32 *)&stats_flags)[1]);
4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4676 ((u32 *)&stats_flags)[0]);
4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678 ((u32 *)&stats_flags)[1]);
4680 REG_WR(bp, BAR_XSTRORM_INTMEM +
4681 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683 REG_WR(bp, BAR_XSTRORM_INTMEM +
4684 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4687 REG_WR(bp, BAR_TSTRORM_INTMEM +
4688 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690 REG_WR(bp, BAR_TSTRORM_INTMEM +
4691 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4694 if (CHIP_IS_E1H(bp)) {
4695 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4697 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4699 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4701 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4704 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4708 /* Init CQ ring mapping and aggregation size */
4709 max_agg_size = min((u32)(bp->rx_buf_use_size +
4710 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4712 for_each_queue(bp, i) {
4713 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 REG_WR(bp, BAR_USTRORM_INTMEM +
4716 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4717 U64_LO(fp->rx_comp_mapping));
4718 REG_WR(bp, BAR_USTRORM_INTMEM +
4719 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4720 U64_HI(fp->rx_comp_mapping));
4722 REG_WR16(bp, BAR_USTRORM_INTMEM +
4723 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4728 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4730 switch (load_code) {
4731 case FW_MSG_CODE_DRV_LOAD_COMMON:
4732 bnx2x_init_internal_common(bp);
4735 case FW_MSG_CODE_DRV_LOAD_PORT:
4736 bnx2x_init_internal_port(bp);
4739 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740 bnx2x_init_internal_func(bp);
4744 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4749 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4753 for_each_queue(bp, i) {
4754 struct bnx2x_fastpath *fp = &bp->fp[i];
4757 fp->state = BNX2X_FP_STATE_CLOSED;
4759 fp->cl_id = BP_L_ID(bp) + i;
4760 fp->sb_id = fp->cl_id;
4762 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4763 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4764 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4766 bnx2x_update_fpsb_idx(fp);
4769 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4771 bnx2x_update_dsb_idx(bp);
4772 bnx2x_update_coalesce(bp);
4773 bnx2x_init_rx_rings(bp);
4774 bnx2x_init_tx_ring(bp);
4775 bnx2x_init_sp_ring(bp);
4776 bnx2x_init_context(bp);
4777 bnx2x_init_internal(bp, load_code);
4778 bnx2x_init_ind_table(bp);
4779 bnx2x_int_enable(bp);
4782 /* end of nic init */
4785 * gzip service functions
4788 static int bnx2x_gunzip_init(struct bnx2x *bp)
4790 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4791 &bp->gunzip_mapping);
4792 if (bp->gunzip_buf == NULL)
4795 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4796 if (bp->strm == NULL)
4799 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4801 if (bp->strm->workspace == NULL)
4811 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4812 bp->gunzip_mapping);
4813 bp->gunzip_buf = NULL;
4816 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4817 " un-compression\n", bp->dev->name);
4821 static void bnx2x_gunzip_end(struct bnx2x *bp)
4823 kfree(bp->strm->workspace);
4828 if (bp->gunzip_buf) {
4829 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4830 bp->gunzip_mapping);
4831 bp->gunzip_buf = NULL;
4835 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4839 /* check gzip header */
4840 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4847 if (zbuf[3] & FNAME)
4848 while ((zbuf[n++] != 0) && (n < len));
4850 bp->strm->next_in = zbuf + n;
4851 bp->strm->avail_in = len - n;
4852 bp->strm->next_out = bp->gunzip_buf;
4853 bp->strm->avail_out = FW_BUF_SIZE;
4855 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4859 rc = zlib_inflate(bp->strm, Z_FINISH);
4860 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4861 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4862 bp->dev->name, bp->strm->msg);
4864 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4865 if (bp->gunzip_outlen & 0x3)
4866 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4867 " gunzip_outlen (%d) not aligned\n",
4868 bp->dev->name, bp->gunzip_outlen);
4869 bp->gunzip_outlen >>= 2;
4871 zlib_inflateEnd(bp->strm);
4873 if (rc == Z_STREAM_END)
4879 /* nic load/unload */
4882 * General service functions
4885 /* send a NIG loopback debug packet */
4886 static void bnx2x_lb_pckt(struct bnx2x *bp)
4890 /* Ethernet source and destination addresses */
4891 wb_write[0] = 0x55555555;
4892 wb_write[1] = 0x55555555;
4893 wb_write[2] = 0x20; /* SOP */
4894 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4896 /* NON-IP protocol */
4897 wb_write[0] = 0x09000000;
4898 wb_write[1] = 0x55555555;
4899 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4900 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4903 /* some of the internal memories
4904 * are not directly readable from the driver
4905 * to test them we send debug packets
4907 static int bnx2x_int_mem_test(struct bnx2x *bp)
4913 if (CHIP_REV_IS_FPGA(bp))
4915 else if (CHIP_REV_IS_EMUL(bp))
4920 DP(NETIF_MSG_HW, "start part1\n");
4922 /* Disable inputs of parser neighbor blocks */
4923 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4924 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4925 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4926 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4928 /* Write 0 to parser credits for CFC search request */
4929 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4931 /* send Ethernet packet */
4934 /* TODO do i reset NIG statistic? */
4935 /* Wait until NIG register shows 1 packet of size 0x10 */
4936 count = 1000 * factor;
4939 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4940 val = *bnx2x_sp(bp, wb_data[0]);
4948 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4952 /* Wait until PRS register shows 1 packet */
4953 count = 1000 * factor;
4955 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4963 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4967 /* Reset and init BRB, PRS */
4968 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4972 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4973 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4975 DP(NETIF_MSG_HW, "part2\n");
4977 /* Disable inputs of parser neighbor blocks */
4978 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4979 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4980 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4981 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983 /* Write 0 to parser credits for CFC search request */
4984 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986 /* send 10 Ethernet packets */
4987 for (i = 0; i < 10; i++)
4990 /* Wait until NIG register shows 10 + 1
4991 packets of size 11*0x10 = 0xb0 */
4992 count = 1000 * factor;
4995 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996 val = *bnx2x_sp(bp, wb_data[0]);
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5008 /* Wait until PRS register shows 2 packets */
5009 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5011 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5013 /* Write 1 to parser credits for CFC search request */
5014 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5016 /* Wait until PRS register shows 3 packets */
5017 msleep(10 * factor);
5018 /* Wait until NIG register shows 1 packet of size 0x10 */
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5021 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5023 /* clear NIG EOP FIFO */
5024 for (i = 0; i < 11; i++)
5025 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5026 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5028 BNX2X_ERR("clear of NIG failed\n");
5032 /* Reset and init BRB, PRS, NIG */
5033 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5035 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5037 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5038 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5041 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5044 /* Enable inputs of parser neighbor blocks */
5045 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5046 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5047 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5048 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5050 DP(NETIF_MSG_HW, "done\n");
5055 static void enable_blocks_attention(struct bnx2x *bp)
5057 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5058 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5059 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5060 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5061 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5062 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5063 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5064 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5065 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5066 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5067 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5068 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5069 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5070 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5071 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5072 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5073 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5074 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5075 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5076 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5077 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5078 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5079 if (CHIP_REV_IS_FPGA(bp))
5080 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5082 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5083 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5084 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5085 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5086 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5087 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5088 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5089 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5090 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5091 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5095 static int bnx2x_init_common(struct bnx2x *bp)
5099 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5102 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5104 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5105 if (CHIP_IS_E1H(bp))
5106 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5108 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5110 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5112 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5113 if (CHIP_IS_E1(bp)) {
5114 /* enable HW interrupt from PXP on USDM overflow
5115 bit 16 on INT_MASK_0 */
5116 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5119 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5123 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5124 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5125 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5126 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5127 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5128 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5130 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5131 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5132 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5133 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5134 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5144 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5147 /* let the HW do it's magic ... */
5149 /* finish PXP init */
5150 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5152 BNX2X_ERR("PXP2 CFG failed\n");
5155 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5157 BNX2X_ERR("PXP2 RD_INIT failed\n");
5161 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5164 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5166 /* clean the DMAE memory */
5168 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5170 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5175 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181 /* soft reset pulse */
5182 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5186 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5189 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191 if (!CHIP_REV_IS_SLOW(bp)) {
5192 /* enable hw interrupt from doorbell Q */
5193 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5196 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197 if (CHIP_REV_IS_SLOW(bp)) {
5198 /* fix for emulation and FPGA for no pause */
5199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5207 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5208 if (CHIP_IS_E1H(bp))
5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5211 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5216 if (CHIP_IS_E1H(bp)) {
5217 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5220 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5225 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5230 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1H/2);
5235 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5248 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5259 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5263 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265 REG_WR(bp, i, 0xc0cac01a);
5266 /* TODO: replace with something meaningful */
5268 if (CHIP_IS_E1H(bp))
5269 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5272 if (sizeof(union cdu_context) != 1024)
5273 /* we currently assume that a context is 1024 bytes */
5274 printk(KERN_ALERT PFX "please adjust the size of"
5275 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5277 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278 val = (4 << 24) + (0 << 12) + 1024;
5279 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280 if (CHIP_IS_E1(bp)) {
5281 /* !!! fix pxp client crdit until excel update */
5282 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5286 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5289 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5292 /* PXPCS COMMON comes here */
5293 /* Reset PCIE errors for debug */
5294 REG_WR(bp, 0x2814, 0xffffffff);
5295 REG_WR(bp, 0x3820, 0xffffffff);
5297 /* EMAC0 COMMON comes here */
5298 /* EMAC1 COMMON comes here */
5299 /* DBU COMMON comes here */
5300 /* DBG COMMON comes here */
5302 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303 if (CHIP_IS_E1H(bp)) {
5304 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5308 if (CHIP_REV_IS_SLOW(bp))
5311 /* finish CFC init */
5312 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5314 BNX2X_ERR("CFC LL_INIT failed\n");
5317 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5319 BNX2X_ERR("CFC AC_INIT failed\n");
5322 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5324 BNX2X_ERR("CFC CAM_INIT failed\n");
5327 REG_WR(bp, CFC_REG_DEBUG0, 0);
5329 /* read NIG statistic
5330 to see if this is our first up since powerup */
5331 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332 val = *bnx2x_sp(bp, wb_data[0]);
5334 /* do internal memory self test */
5335 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336 BNX2X_ERR("internal mem self test failed\n");
5340 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5342 /* Fan failure is indicated by SPIO 5 */
5343 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5344 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5346 /* set to active low mode */
5347 val = REG_RD(bp, MISC_REG_SPIO_INT);
5348 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5349 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5350 REG_WR(bp, MISC_REG_SPIO_INT, val);
5352 /* enable interrupt to signal the IGU */
5353 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5354 val |= (1 << MISC_REGISTERS_SPIO_5);
5355 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5362 /* clear PXP2 attentions */
5363 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5365 enable_blocks_attention(bp);
5367 if (bp->flags & TPA_ENABLE_FLAG) {
5368 struct tstorm_eth_tpa_exist tmp = {0};
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5378 if (!BP_NOMCP(bp)) {
5379 bnx2x_acquire_phy_lock(bp);
5380 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381 bnx2x_release_phy_lock(bp);
5383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5388 static int bnx2x_init_port(struct bnx2x *bp)
5390 int port = BP_PORT(bp);
5393 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5395 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5397 /* Port PXP comes here */
5398 /* Port PXP2 comes here */
5403 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5404 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5405 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5406 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5411 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5412 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5413 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5414 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5419 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5420 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5421 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5424 /* Port CMs come here */
5426 /* Port QM comes here */
5428 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5429 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5431 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5432 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5434 /* Port DQ comes here */
5435 /* Port BRB1 comes here */
5436 /* Port PRS comes here */
5437 /* Port TSDM comes here */
5438 /* Port CSDM comes here */
5439 /* Port USDM comes here */
5440 /* Port XSDM comes here */
5441 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5442 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5443 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5444 port ? USEM_PORT1_END : USEM_PORT0_END);
5445 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5446 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5447 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5448 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5449 /* Port UPB comes here */
5450 /* Port XPB comes here */
5452 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5453 port ? PBF_PORT1_END : PBF_PORT0_END);
5455 /* configure PBF to work without PAUSE mtu 9000 */
5456 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5458 /* update threshold */
5459 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5460 /* update init credit */
5461 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5464 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5466 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5469 /* tell the searcher where the T2 table is */
5470 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5472 wb_write[0] = U64_LO(bp->t2_mapping);
5473 wb_write[1] = U64_HI(bp->t2_mapping);
5474 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5475 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5476 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5477 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5479 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5480 /* Port SRCH comes here */
5482 /* Port CDU comes here */
5483 /* Port CFC comes here */
5485 if (CHIP_IS_E1(bp)) {
5486 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5487 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5489 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5490 port ? HC_PORT1_END : HC_PORT0_END);
5492 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5493 MISC_AEU_PORT0_START,
5494 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5495 /* init aeu_mask_attn_func_0/1:
5496 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498 * bits 4-7 are used for "per vn group attention" */
5499 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5500 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5502 /* Port PXPCS comes here */
5503 /* Port EMAC0 comes here */
5504 /* Port EMAC1 comes here */
5505 /* Port DBU comes here */
5506 /* Port DBG comes here */
5507 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5508 port ? NIG_PORT1_END : NIG_PORT0_END);
5510 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5512 if (CHIP_IS_E1H(bp)) {
5514 struct cmng_struct_per_port m_cmng_port;
5517 /* 0x2 disable e1hov, 0x1 enable */
5518 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5519 (IS_E1HMF(bp) ? 0x1 : 0x2));
5521 /* Init RATE SHAPING and FAIRNESS contexts.
5522 Initialize as if there is 10G link. */
5523 wsum = bnx2x_calc_vn_wsum(bp);
5524 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5526 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527 bnx2x_init_vn_minmax(bp, 2*vn + port,
5528 wsum, 10000, &m_cmng_port);
5531 /* Port MCP comes here */
5532 /* Port DMAE comes here */
5534 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5535 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5536 /* add SPIO 5 to group 0 */
5537 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5538 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5539 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5546 bnx2x__link_reset(bp);
5551 #define ILT_PER_FUNC (768/2)
5552 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5553 /* the phys address is shifted right 12 bits and has an added
5554 1=valid bit added to the 53rd bit
5555 then since this is a wide register(TM)
5556 we split it into two 32 bit writes
5558 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5559 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5560 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5561 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5563 #define CNIC_ILT_LINES 0
5565 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5569 if (CHIP_IS_E1H(bp))
5570 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5572 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5574 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5577 static int bnx2x_init_func(struct bnx2x *bp)
5579 int port = BP_PORT(bp);
5580 int func = BP_FUNC(bp);
5583 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5585 i = FUNC_ILT_BASE(func);
5587 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5588 if (CHIP_IS_E1H(bp)) {
5589 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5590 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5592 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5593 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5596 if (CHIP_IS_E1H(bp)) {
5597 for (i = 0; i < 9; i++)
5598 bnx2x_init_block(bp,
5599 cm_start[func][i], cm_end[func][i]);
5601 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5602 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5605 /* HC init per function */
5606 if (CHIP_IS_E1H(bp)) {
5607 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5609 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5610 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5612 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5614 if (CHIP_IS_E1H(bp))
5615 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5617 /* Reset PCIE errors for debug */
5618 REG_WR(bp, 0x2114, 0xffffffff);
5619 REG_WR(bp, 0x2120, 0xffffffff);
5624 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5628 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5629 BP_FUNC(bp), load_code);
5632 mutex_init(&bp->dmae_mutex);
5633 bnx2x_gunzip_init(bp);
5635 switch (load_code) {
5636 case FW_MSG_CODE_DRV_LOAD_COMMON:
5637 rc = bnx2x_init_common(bp);
5642 case FW_MSG_CODE_DRV_LOAD_PORT:
5644 rc = bnx2x_init_port(bp);
5649 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5651 rc = bnx2x_init_func(bp);
5657 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5661 if (!BP_NOMCP(bp)) {
5662 int func = BP_FUNC(bp);
5664 bp->fw_drv_pulse_wr_seq =
5665 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5666 DRV_PULSE_SEQ_MASK);
5667 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5668 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5669 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5673 /* this needs to be done before gunzip end */
5674 bnx2x_zero_def_sb(bp);
5675 for_each_queue(bp, i)
5676 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5679 bnx2x_gunzip_end(bp);
5684 /* send the MCP a request, block until there is a reply */
5685 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687 int func = BP_FUNC(bp);
5688 u32 seq = ++bp->fw_seq;
5691 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5693 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5694 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5697 /* let the FW do it's magic ... */
5700 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5702 /* Give the FW up to 2 second (200*10ms) */
5703 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5705 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706 cnt*delay, rc, seq);
5708 /* is this a reply to our command? */
5709 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5710 rc &= FW_MSG_CODE_MASK;
5714 BNX2X_ERR("FW failed to respond!\n");
5722 static void bnx2x_free_mem(struct bnx2x *bp)
5725 #define BNX2X_PCI_FREE(x, y, size) \
5728 pci_free_consistent(bp->pdev, size, x, y); \
5734 #define BNX2X_FREE(x) \
5745 for_each_queue(bp, i) {
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5749 bnx2x_fp(bp, i, status_blk_mapping),
5750 sizeof(struct host_status_block) +
5751 sizeof(struct eth_tx_db_data));
5753 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5754 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5756 bnx2x_fp(bp, i, tx_desc_mapping),
5757 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5759 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5761 bnx2x_fp(bp, i, rx_desc_mapping),
5762 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5764 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5765 bnx2x_fp(bp, i, rx_comp_mapping),
5766 sizeof(struct eth_fast_path_rx_cqe) *
5770 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5771 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5772 bnx2x_fp(bp, i, rx_sge_mapping),
5773 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5775 /* end of fastpath */
5777 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5778 sizeof(struct host_def_status_block));
5780 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5781 sizeof(struct bnx2x_slowpath));
5784 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5785 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);